rev 9846 : [mq]: par-scav-patch
rev 9847 : 8146987: Improve Parallel GC Full GC by caching results of live_words_in_range()
Summary: A large part of time in the parallel scavenge collector is spent finding out the amount of live words within memory ranges to find out where to move an object to. Try to incrementally calculate this value.
Reviewed-by: tschatzl, mgerdin
Contributed-by: ray alex <sky1young@gmail.com>

   1 /*
   2  * Copyright (c) 2005, 2016, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/stringTable.hpp"
  27 #include "classfile/symbolTable.hpp"
  28 #include "classfile/systemDictionary.hpp"
  29 #include "code/codeCache.hpp"
  30 #include "gc/parallel/gcTaskManager.hpp"
  31 #include "gc/parallel/parallelScavengeHeap.inline.hpp"
  32 #include "gc/parallel/pcTasks.hpp"
  33 #include "gc/parallel/psAdaptiveSizePolicy.hpp"
  34 #include "gc/parallel/psCompactionManager.inline.hpp"
  35 #include "gc/parallel/psMarkSweep.hpp"
  36 #include "gc/parallel/psMarkSweepDecorator.hpp"
  37 #include "gc/parallel/psOldGen.hpp"
  38 #include "gc/parallel/psParallelCompact.inline.hpp"
  39 #include "gc/parallel/psPromotionManager.inline.hpp"
  40 #include "gc/parallel/psScavenge.hpp"
  41 #include "gc/parallel/psYoungGen.hpp"
  42 #include "gc/shared/gcCause.hpp"
  43 #include "gc/shared/gcHeapSummary.hpp"
  44 #include "gc/shared/gcId.hpp"
  45 #include "gc/shared/gcLocker.inline.hpp"
  46 #include "gc/shared/gcTimer.hpp"
  47 #include "gc/shared/gcTrace.hpp"
  48 #include "gc/shared/gcTraceTime.inline.hpp"
  49 #include "gc/shared/isGCActiveMark.hpp"
  50 #include "gc/shared/referencePolicy.hpp"
  51 #include "gc/shared/referenceProcessor.hpp"
  52 #include "gc/shared/spaceDecorator.hpp"
  53 #include "logging/log.hpp"
  54 #include "oops/instanceKlass.inline.hpp"
  55 #include "oops/instanceMirrorKlass.inline.hpp"
  56 #include "oops/methodData.hpp"
  57 #include "oops/objArrayKlass.inline.hpp"
  58 #include "oops/oop.inline.hpp"
  59 #include "runtime/atomic.inline.hpp"
  60 #include "runtime/fprofiler.hpp"
  61 #include "runtime/safepoint.hpp"
  62 #include "runtime/vmThread.hpp"
  63 #include "services/management.hpp"
  64 #include "services/memTracker.hpp"
  65 #include "services/memoryService.hpp"
  66 #include "utilities/events.hpp"
  67 #include "utilities/stack.inline.hpp"
  68 
  69 #include <math.h>
  70 
  71 // All sizes are in HeapWords.
  72 const size_t ParallelCompactData::Log2RegionSize  = 16; // 64K words
  73 const size_t ParallelCompactData::RegionSize      = (size_t)1 << Log2RegionSize;
  74 const size_t ParallelCompactData::RegionSizeBytes =
  75   RegionSize << LogHeapWordSize;
  76 const size_t ParallelCompactData::RegionSizeOffsetMask = RegionSize - 1;
  77 const size_t ParallelCompactData::RegionAddrOffsetMask = RegionSizeBytes - 1;
  78 const size_t ParallelCompactData::RegionAddrMask       = ~RegionAddrOffsetMask;
  79 
  80 const size_t ParallelCompactData::Log2BlockSize   = 7; // 128 words
  81 const size_t ParallelCompactData::BlockSize       = (size_t)1 << Log2BlockSize;
  82 const size_t ParallelCompactData::BlockSizeBytes  =
  83   BlockSize << LogHeapWordSize;
  84 const size_t ParallelCompactData::BlockSizeOffsetMask = BlockSize - 1;
  85 const size_t ParallelCompactData::BlockAddrOffsetMask = BlockSizeBytes - 1;
  86 const size_t ParallelCompactData::BlockAddrMask       = ~BlockAddrOffsetMask;
  87 
  88 const size_t ParallelCompactData::BlocksPerRegion = RegionSize / BlockSize;
  89 const size_t ParallelCompactData::Log2BlocksPerRegion =
  90   Log2RegionSize - Log2BlockSize;
  91 
  92 const ParallelCompactData::RegionData::region_sz_t
  93 ParallelCompactData::RegionData::dc_shift = 27;
  94 
  95 const ParallelCompactData::RegionData::region_sz_t
  96 ParallelCompactData::RegionData::dc_mask = ~0U << dc_shift;
  97 
  98 const ParallelCompactData::RegionData::region_sz_t
  99 ParallelCompactData::RegionData::dc_one = 0x1U << dc_shift;
 100 
 101 const ParallelCompactData::RegionData::region_sz_t
 102 ParallelCompactData::RegionData::los_mask = ~dc_mask;
 103 
 104 const ParallelCompactData::RegionData::region_sz_t
 105 ParallelCompactData::RegionData::dc_claimed = 0x8U << dc_shift;
 106 
 107 const ParallelCompactData::RegionData::region_sz_t
 108 ParallelCompactData::RegionData::dc_completed = 0xcU << dc_shift;
 109 
 110 SpaceInfo PSParallelCompact::_space_info[PSParallelCompact::last_space_id];
 111 
 112 ReferenceProcessor* PSParallelCompact::_ref_processor = NULL;
 113 
 114 double PSParallelCompact::_dwl_mean;
 115 double PSParallelCompact::_dwl_std_dev;
 116 double PSParallelCompact::_dwl_first_term;
 117 double PSParallelCompact::_dwl_adjustment;
 118 #ifdef  ASSERT
 119 bool   PSParallelCompact::_dwl_initialized = false;
 120 #endif  // #ifdef ASSERT
 121 
 122 void SplitInfo::record(size_t src_region_idx, size_t partial_obj_size,
 123                        HeapWord* destination)
 124 {
 125   assert(src_region_idx != 0, "invalid src_region_idx");
 126   assert(partial_obj_size != 0, "invalid partial_obj_size argument");
 127   assert(destination != NULL, "invalid destination argument");
 128 
 129   _src_region_idx = src_region_idx;
 130   _partial_obj_size = partial_obj_size;
 131   _destination = destination;
 132 
 133   // These fields may not be updated below, so make sure they're clear.
 134   assert(_dest_region_addr == NULL, "should have been cleared");
 135   assert(_first_src_addr == NULL, "should have been cleared");
 136 
 137   // Determine the number of destination regions for the partial object.
 138   HeapWord* const last_word = destination + partial_obj_size - 1;
 139   const ParallelCompactData& sd = PSParallelCompact::summary_data();
 140   HeapWord* const beg_region_addr = sd.region_align_down(destination);
 141   HeapWord* const end_region_addr = sd.region_align_down(last_word);
 142 
 143   if (beg_region_addr == end_region_addr) {
 144     // One destination region.
 145     _destination_count = 1;
 146     if (end_region_addr == destination) {
 147       // The destination falls on a region boundary, thus the first word of the
 148       // partial object will be the first word copied to the destination region.
 149       _dest_region_addr = end_region_addr;
 150       _first_src_addr = sd.region_to_addr(src_region_idx);
 151     }
 152   } else {
 153     // Two destination regions.  When copied, the partial object will cross a
 154     // destination region boundary, so a word somewhere within the partial
 155     // object will be the first word copied to the second destination region.
 156     _destination_count = 2;
 157     _dest_region_addr = end_region_addr;
 158     const size_t ofs = pointer_delta(end_region_addr, destination);
 159     assert(ofs < _partial_obj_size, "sanity");
 160     _first_src_addr = sd.region_to_addr(src_region_idx) + ofs;
 161   }
 162 }
 163 
 164 void SplitInfo::clear()
 165 {
 166   _src_region_idx = 0;
 167   _partial_obj_size = 0;
 168   _destination = NULL;
 169   _destination_count = 0;
 170   _dest_region_addr = NULL;
 171   _first_src_addr = NULL;
 172   assert(!is_valid(), "sanity");
 173 }
 174 
 175 #ifdef  ASSERT
 176 void SplitInfo::verify_clear()
 177 {
 178   assert(_src_region_idx == 0, "not clear");
 179   assert(_partial_obj_size == 0, "not clear");
 180   assert(_destination == NULL, "not clear");
 181   assert(_destination_count == 0, "not clear");
 182   assert(_dest_region_addr == NULL, "not clear");
 183   assert(_first_src_addr == NULL, "not clear");
 184 }
 185 #endif  // #ifdef ASSERT
 186 
 187 
 188 void PSParallelCompact::print_on_error(outputStream* st) {
 189   _mark_bitmap.print_on_error(st);
 190 }
 191 
 192 #ifndef PRODUCT
 193 const char* PSParallelCompact::space_names[] = {
 194   "old ", "eden", "from", "to  "
 195 };
 196 
 197 void PSParallelCompact::print_region_ranges() {
 198   if (!develop_log_is_enabled(Trace, gc, compaction, phases)) {
 199     return;
 200   }
 201   LogHandle(gc, compaction, phases) log;
 202   ResourceMark rm;
 203   Universe::print_on(log.trace_stream());
 204   log.trace("space  bottom     top        end        new_top");
 205   log.trace("------ ---------- ---------- ---------- ----------");
 206 
 207   for (unsigned int id = 0; id < last_space_id; ++id) {
 208     const MutableSpace* space = _space_info[id].space();
 209     log.trace("%u %s "
 210               SIZE_FORMAT_W(10) " " SIZE_FORMAT_W(10) " "
 211               SIZE_FORMAT_W(10) " " SIZE_FORMAT_W(10) " ",
 212               id, space_names[id],
 213               summary_data().addr_to_region_idx(space->bottom()),
 214               summary_data().addr_to_region_idx(space->top()),
 215               summary_data().addr_to_region_idx(space->end()),
 216               summary_data().addr_to_region_idx(_space_info[id].new_top()));
 217   }
 218 }
 219 
 220 void
 221 print_generic_summary_region(size_t i, const ParallelCompactData::RegionData* c)
 222 {
 223 #define REGION_IDX_FORMAT        SIZE_FORMAT_W(7)
 224 #define REGION_DATA_FORMAT       SIZE_FORMAT_W(5)
 225 
 226   ParallelCompactData& sd = PSParallelCompact::summary_data();
 227   size_t dci = c->destination() ? sd.addr_to_region_idx(c->destination()) : 0;
 228   log_develop_trace(gc, compaction, phases)(
 229       REGION_IDX_FORMAT " " PTR_FORMAT " "
 230       REGION_IDX_FORMAT " " PTR_FORMAT " "
 231       REGION_DATA_FORMAT " " REGION_DATA_FORMAT " "
 232       REGION_DATA_FORMAT " " REGION_IDX_FORMAT " %d",
 233       i, p2i(c->data_location()), dci, p2i(c->destination()),
 234       c->partial_obj_size(), c->live_obj_size(),
 235       c->data_size(), c->source_region(), c->destination_count());
 236 
 237 #undef  REGION_IDX_FORMAT
 238 #undef  REGION_DATA_FORMAT
 239 }
 240 
 241 void
 242 print_generic_summary_data(ParallelCompactData& summary_data,
 243                            HeapWord* const beg_addr,
 244                            HeapWord* const end_addr)
 245 {
 246   size_t total_words = 0;
 247   size_t i = summary_data.addr_to_region_idx(beg_addr);
 248   const size_t last = summary_data.addr_to_region_idx(end_addr);
 249   HeapWord* pdest = 0;
 250 
 251   while (i <= last) {
 252     ParallelCompactData::RegionData* c = summary_data.region(i);
 253     if (c->data_size() != 0 || c->destination() != pdest) {
 254       print_generic_summary_region(i, c);
 255       total_words += c->data_size();
 256       pdest = c->destination();
 257     }
 258     ++i;
 259   }
 260 
 261   log_develop_trace(gc, compaction, phases)("summary_data_bytes=" SIZE_FORMAT, total_words * HeapWordSize);
 262 }
 263 
 264 void
 265 print_generic_summary_data(ParallelCompactData& summary_data,
 266                            SpaceInfo* space_info)
 267 {
 268   if (!develop_log_is_enabled(Trace, gc, compaction, phases)) {
 269     return;
 270   }
 271 
 272   for (unsigned int id = 0; id < PSParallelCompact::last_space_id; ++id) {
 273     const MutableSpace* space = space_info[id].space();
 274     print_generic_summary_data(summary_data, space->bottom(),
 275                                MAX2(space->top(), space_info[id].new_top()));
 276   }
 277 }
 278 
 279 void
 280 print_initial_summary_data(ParallelCompactData& summary_data,
 281                            const MutableSpace* space) {
 282   if (space->top() == space->bottom()) {
 283     return;
 284   }
 285 
 286   const size_t region_size = ParallelCompactData::RegionSize;
 287   typedef ParallelCompactData::RegionData RegionData;
 288   HeapWord* const top_aligned_up = summary_data.region_align_up(space->top());
 289   const size_t end_region = summary_data.addr_to_region_idx(top_aligned_up);
 290   const RegionData* c = summary_data.region(end_region - 1);
 291   HeapWord* end_addr = c->destination() + c->data_size();
 292   const size_t live_in_space = pointer_delta(end_addr, space->bottom());
 293 
 294   // Print (and count) the full regions at the beginning of the space.
 295   size_t full_region_count = 0;
 296   size_t i = summary_data.addr_to_region_idx(space->bottom());
 297   while (i < end_region && summary_data.region(i)->data_size() == region_size) {
 298     ParallelCompactData::RegionData* c = summary_data.region(i);
 299     log_develop_trace(gc, compaction, phases)(
 300         SIZE_FORMAT_W(5) " " PTR_FORMAT " " SIZE_FORMAT_W(5) " " SIZE_FORMAT_W(5) " " SIZE_FORMAT_W(5) " " SIZE_FORMAT_W(5) " %d",
 301         i, p2i(c->destination()),
 302         c->partial_obj_size(), c->live_obj_size(),
 303         c->data_size(), c->source_region(), c->destination_count());
 304     ++full_region_count;
 305     ++i;
 306   }
 307 
 308   size_t live_to_right = live_in_space - full_region_count * region_size;
 309 
 310   double max_reclaimed_ratio = 0.0;
 311   size_t max_reclaimed_ratio_region = 0;
 312   size_t max_dead_to_right = 0;
 313   size_t max_live_to_right = 0;
 314 
 315   // Print the 'reclaimed ratio' for regions while there is something live in
 316   // the region or to the right of it.  The remaining regions are empty (and
 317   // uninteresting), and computing the ratio will result in division by 0.
 318   while (i < end_region && live_to_right > 0) {
 319     c = summary_data.region(i);
 320     HeapWord* const region_addr = summary_data.region_to_addr(i);
 321     const size_t used_to_right = pointer_delta(space->top(), region_addr);
 322     const size_t dead_to_right = used_to_right - live_to_right;
 323     const double reclaimed_ratio = double(dead_to_right) / live_to_right;
 324 
 325     if (reclaimed_ratio > max_reclaimed_ratio) {
 326             max_reclaimed_ratio = reclaimed_ratio;
 327             max_reclaimed_ratio_region = i;
 328             max_dead_to_right = dead_to_right;
 329             max_live_to_right = live_to_right;
 330     }
 331 
 332     ParallelCompactData::RegionData* c = summary_data.region(i);
 333     log_develop_trace(gc, compaction, phases)(
 334         SIZE_FORMAT_W(5) " " PTR_FORMAT " " SIZE_FORMAT_W(5) " " SIZE_FORMAT_W(5) " " SIZE_FORMAT_W(5) " " SIZE_FORMAT_W(5) " %d"
 335         "%12.10f " SIZE_FORMAT_W(10) " " SIZE_FORMAT_W(10),
 336         i, p2i(c->destination()),
 337         c->partial_obj_size(), c->live_obj_size(),
 338         c->data_size(), c->source_region(), c->destination_count(),
 339         reclaimed_ratio, dead_to_right, live_to_right);
 340 
 341 
 342     live_to_right -= c->data_size();
 343     ++i;
 344   }
 345 
 346   // Any remaining regions are empty.  Print one more if there is one.
 347   if (i < end_region) {
 348     ParallelCompactData::RegionData* c = summary_data.region(i);
 349     log_develop_trace(gc, compaction, phases)(
 350         SIZE_FORMAT_W(5) " " PTR_FORMAT " " SIZE_FORMAT_W(5) " " SIZE_FORMAT_W(5) " " SIZE_FORMAT_W(5) " " SIZE_FORMAT_W(5) " %d",
 351          i, p2i(c->destination()),
 352          c->partial_obj_size(), c->live_obj_size(),
 353          c->data_size(), c->source_region(), c->destination_count());
 354   }
 355 
 356   log_develop_trace(gc, compaction, phases)("max:  " SIZE_FORMAT_W(4) " d2r=" SIZE_FORMAT_W(10) " l2r=" SIZE_FORMAT_W(10) " max_ratio=%14.12f",
 357                                             max_reclaimed_ratio_region, max_dead_to_right, max_live_to_right, max_reclaimed_ratio);
 358 }
 359 
 360 void
 361 print_initial_summary_data(ParallelCompactData& summary_data,
 362                            SpaceInfo* space_info) {
 363   if (!develop_log_is_enabled(Trace, gc, compaction, phases)) {
 364     return;
 365   }
 366 
 367   unsigned int id = PSParallelCompact::old_space_id;
 368   const MutableSpace* space;
 369   do {
 370     space = space_info[id].space();
 371     print_initial_summary_data(summary_data, space);
 372   } while (++id < PSParallelCompact::eden_space_id);
 373 
 374   do {
 375     space = space_info[id].space();
 376     print_generic_summary_data(summary_data, space->bottom(), space->top());
 377   } while (++id < PSParallelCompact::last_space_id);
 378 }
 379 #endif  // #ifndef PRODUCT
 380 
 381 #ifdef  ASSERT
 382 size_t add_obj_count;
 383 size_t add_obj_size;
 384 size_t mark_bitmap_count;
 385 size_t mark_bitmap_size;
 386 #endif  // #ifdef ASSERT
 387 
 388 ParallelCompactData::ParallelCompactData()
 389 {
 390   _region_start = 0;
 391 
 392   _region_vspace = 0;
 393   _reserved_byte_size = 0;
 394   _region_data = 0;
 395   _region_count = 0;
 396 
 397   _block_vspace = 0;
 398   _block_data = 0;
 399   _block_count = 0;
 400 }
 401 
 402 bool ParallelCompactData::initialize(MemRegion covered_region)
 403 {
 404   _region_start = covered_region.start();
 405   const size_t region_size = covered_region.word_size();
 406   DEBUG_ONLY(_region_end = _region_start + region_size;)
 407 
 408   assert(region_align_down(_region_start) == _region_start,
 409          "region start not aligned");
 410   assert((region_size & RegionSizeOffsetMask) == 0,
 411          "region size not a multiple of RegionSize");
 412 
 413   bool result = initialize_region_data(region_size) && initialize_block_data();
 414   return result;
 415 }
 416 
 417 PSVirtualSpace*
 418 ParallelCompactData::create_vspace(size_t count, size_t element_size)
 419 {
 420   const size_t raw_bytes = count * element_size;
 421   const size_t page_sz = os::page_size_for_region_aligned(raw_bytes, 10);
 422   const size_t granularity = os::vm_allocation_granularity();
 423   _reserved_byte_size = align_size_up(raw_bytes, MAX2(page_sz, granularity));
 424 
 425   const size_t rs_align = page_sz == (size_t) os::vm_page_size() ? 0 :
 426     MAX2(page_sz, granularity);
 427   ReservedSpace rs(_reserved_byte_size, rs_align, rs_align > 0);
 428   os::trace_page_sizes("par compact", raw_bytes, raw_bytes, page_sz, rs.base(),
 429                        rs.size());
 430 
 431   MemTracker::record_virtual_memory_type((address)rs.base(), mtGC);
 432 
 433   PSVirtualSpace* vspace = new PSVirtualSpace(rs, page_sz);
 434   if (vspace != 0) {
 435     if (vspace->expand_by(_reserved_byte_size)) {
 436       return vspace;
 437     }
 438     delete vspace;
 439     // Release memory reserved in the space.
 440     rs.release();
 441   }
 442 
 443   return 0;
 444 }
 445 
 446 bool ParallelCompactData::initialize_region_data(size_t region_size)
 447 {
 448   const size_t count = (region_size + RegionSizeOffsetMask) >> Log2RegionSize;
 449   _region_vspace = create_vspace(count, sizeof(RegionData));
 450   if (_region_vspace != 0) {
 451     _region_data = (RegionData*)_region_vspace->reserved_low_addr();
 452     _region_count = count;
 453     return true;
 454   }
 455   return false;
 456 }
 457 
 458 bool ParallelCompactData::initialize_block_data()
 459 {
 460   assert(_region_count != 0, "region data must be initialized first");
 461   const size_t count = _region_count << Log2BlocksPerRegion;
 462   _block_vspace = create_vspace(count, sizeof(BlockData));
 463   if (_block_vspace != 0) {
 464     _block_data = (BlockData*)_block_vspace->reserved_low_addr();
 465     _block_count = count;
 466     return true;
 467   }
 468   return false;
 469 }
 470 
 471 void ParallelCompactData::clear()
 472 {
 473   memset(_region_data, 0, _region_vspace->committed_size());
 474   memset(_block_data, 0, _block_vspace->committed_size());
 475 }
 476 
 477 void ParallelCompactData::clear_range(size_t beg_region, size_t end_region) {
 478   assert(beg_region <= _region_count, "beg_region out of range");
 479   assert(end_region <= _region_count, "end_region out of range");
 480   assert(RegionSize % BlockSize == 0, "RegionSize not a multiple of BlockSize");
 481 
 482   const size_t region_cnt = end_region - beg_region;
 483   memset(_region_data + beg_region, 0, region_cnt * sizeof(RegionData));
 484 
 485   const size_t beg_block = beg_region * BlocksPerRegion;
 486   const size_t block_cnt = region_cnt * BlocksPerRegion;
 487   memset(_block_data + beg_block, 0, block_cnt * sizeof(BlockData));
 488 }
 489 
 490 HeapWord* ParallelCompactData::partial_obj_end(size_t region_idx) const
 491 {
 492   const RegionData* cur_cp = region(region_idx);
 493   const RegionData* const end_cp = region(region_count() - 1);
 494 
 495   HeapWord* result = region_to_addr(region_idx);
 496   if (cur_cp < end_cp) {
 497     do {
 498       result += cur_cp->partial_obj_size();
 499     } while (cur_cp->partial_obj_size() == RegionSize && ++cur_cp < end_cp);
 500   }
 501   return result;
 502 }
 503 
 504 void ParallelCompactData::add_obj(HeapWord* addr, size_t len)
 505 {
 506   const size_t obj_ofs = pointer_delta(addr, _region_start);
 507   const size_t beg_region = obj_ofs >> Log2RegionSize;
 508   const size_t end_region = (obj_ofs + len - 1) >> Log2RegionSize;
 509 
 510   DEBUG_ONLY(Atomic::inc_ptr(&add_obj_count);)
 511   DEBUG_ONLY(Atomic::add_ptr(len, &add_obj_size);)
 512 
 513   if (beg_region == end_region) {
 514     // All in one region.
 515     _region_data[beg_region].add_live_obj(len);
 516     return;
 517   }
 518 
 519   // First region.
 520   const size_t beg_ofs = region_offset(addr);
 521   _region_data[beg_region].add_live_obj(RegionSize - beg_ofs);
 522 
 523   Klass* klass = ((oop)addr)->klass();
 524   // Middle regions--completely spanned by this object.
 525   for (size_t region = beg_region + 1; region < end_region; ++region) {
 526     _region_data[region].set_partial_obj_size(RegionSize);
 527     _region_data[region].set_partial_obj_addr(addr);
 528   }
 529 
 530   // Last region.
 531   const size_t end_ofs = region_offset(addr + len - 1);
 532   _region_data[end_region].set_partial_obj_size(end_ofs + 1);
 533   _region_data[end_region].set_partial_obj_addr(addr);
 534 }
 535 
 536 void
 537 ParallelCompactData::summarize_dense_prefix(HeapWord* beg, HeapWord* end)
 538 {
 539   assert(region_offset(beg) == 0, "not RegionSize aligned");
 540   assert(region_offset(end) == 0, "not RegionSize aligned");
 541 
 542   size_t cur_region = addr_to_region_idx(beg);
 543   const size_t end_region = addr_to_region_idx(end);
 544   HeapWord* addr = beg;
 545   while (cur_region < end_region) {
 546     _region_data[cur_region].set_destination(addr);
 547     _region_data[cur_region].set_destination_count(0);
 548     _region_data[cur_region].set_source_region(cur_region);
 549     _region_data[cur_region].set_data_location(addr);
 550 
 551     // Update live_obj_size so the region appears completely full.
 552     size_t live_size = RegionSize - _region_data[cur_region].partial_obj_size();
 553     _region_data[cur_region].set_live_obj_size(live_size);
 554 
 555     ++cur_region;
 556     addr += RegionSize;
 557   }
 558 }
 559 
 560 // Find the point at which a space can be split and, if necessary, record the
 561 // split point.
 562 //
 563 // If the current src region (which overflowed the destination space) doesn't
 564 // have a partial object, the split point is at the beginning of the current src
 565 // region (an "easy" split, no extra bookkeeping required).
 566 //
 567 // If the current src region has a partial object, the split point is in the
 568 // region where that partial object starts (call it the split_region).  If
 569 // split_region has a partial object, then the split point is just after that
 570 // partial object (a "hard" split where we have to record the split data and
 571 // zero the partial_obj_size field).  With a "hard" split, we know that the
 572 // partial_obj ends within split_region because the partial object that caused
 573 // the overflow starts in split_region.  If split_region doesn't have a partial
 574 // obj, then the split is at the beginning of split_region (another "easy"
 575 // split).
 576 HeapWord*
 577 ParallelCompactData::summarize_split_space(size_t src_region,
 578                                            SplitInfo& split_info,
 579                                            HeapWord* destination,
 580                                            HeapWord* target_end,
 581                                            HeapWord** target_next)
 582 {
 583   assert(destination <= target_end, "sanity");
 584   assert(destination + _region_data[src_region].data_size() > target_end,
 585     "region should not fit into target space");
 586   assert(is_region_aligned(target_end), "sanity");
 587 
 588   size_t split_region = src_region;
 589   HeapWord* split_destination = destination;
 590   size_t partial_obj_size = _region_data[src_region].partial_obj_size();
 591 
 592   if (destination + partial_obj_size > target_end) {
 593     // The split point is just after the partial object (if any) in the
 594     // src_region that contains the start of the object that overflowed the
 595     // destination space.
 596     //
 597     // Find the start of the "overflow" object and set split_region to the
 598     // region containing it.
 599     HeapWord* const overflow_obj = _region_data[src_region].partial_obj_addr();
 600     split_region = addr_to_region_idx(overflow_obj);
 601 
 602     // Clear the source_region field of all destination regions whose first word
 603     // came from data after the split point (a non-null source_region field
 604     // implies a region must be filled).
 605     //
 606     // An alternative to the simple loop below:  clear during post_compact(),
 607     // which uses memcpy instead of individual stores, and is easy to
 608     // parallelize.  (The downside is that it clears the entire RegionData
 609     // object as opposed to just one field.)
 610     //
 611     // post_compact() would have to clear the summary data up to the highest
 612     // address that was written during the summary phase, which would be
 613     //
 614     //         max(top, max(new_top, clear_top))
 615     //
 616     // where clear_top is a new field in SpaceInfo.  Would have to set clear_top
 617     // to target_end.
 618     const RegionData* const sr = region(split_region);
 619     const size_t beg_idx =
 620       addr_to_region_idx(region_align_up(sr->destination() +
 621                                          sr->partial_obj_size()));
 622     const size_t end_idx = addr_to_region_idx(target_end);
 623 
 624     log_develop_trace(gc, compaction, phases)("split:  clearing source_region field in [" SIZE_FORMAT ", " SIZE_FORMAT ")", beg_idx, end_idx);
 625     for (size_t idx = beg_idx; idx < end_idx; ++idx) {
 626       _region_data[idx].set_source_region(0);
 627     }
 628 
 629     // Set split_destination and partial_obj_size to reflect the split region.
 630     split_destination = sr->destination();
 631     partial_obj_size = sr->partial_obj_size();
 632   }
 633 
 634   // The split is recorded only if a partial object extends onto the region.
 635   if (partial_obj_size != 0) {
 636     _region_data[split_region].set_partial_obj_size(0);
 637     split_info.record(split_region, partial_obj_size, split_destination);
 638   }
 639 
 640   // Setup the continuation addresses.
 641   *target_next = split_destination + partial_obj_size;
 642   HeapWord* const source_next = region_to_addr(split_region) + partial_obj_size;
 643 
 644   if (develop_log_is_enabled(Trace, gc, compaction, phases)) {
 645     const char * split_type = partial_obj_size == 0 ? "easy" : "hard";
 646     log_develop_trace(gc, compaction, phases)("%s split:  src=" PTR_FORMAT " src_c=" SIZE_FORMAT " pos=" SIZE_FORMAT,
 647                                               split_type, p2i(source_next), split_region, partial_obj_size);
 648     log_develop_trace(gc, compaction, phases)("%s split:  dst=" PTR_FORMAT " dst_c=" SIZE_FORMAT " tn=" PTR_FORMAT,
 649                                               split_type, p2i(split_destination),
 650                                               addr_to_region_idx(split_destination),
 651                                               p2i(*target_next));
 652 
 653     if (partial_obj_size != 0) {
 654       HeapWord* const po_beg = split_info.destination();
 655       HeapWord* const po_end = po_beg + split_info.partial_obj_size();
 656       log_develop_trace(gc, compaction, phases)("%s split:  po_beg=" PTR_FORMAT " " SIZE_FORMAT " po_end=" PTR_FORMAT " " SIZE_FORMAT,
 657                                                 split_type,
 658                                                 p2i(po_beg), addr_to_region_idx(po_beg),
 659                                                 p2i(po_end), addr_to_region_idx(po_end));
 660     }
 661   }
 662 
 663   return source_next;
 664 }
 665 
 666 bool ParallelCompactData::summarize(SplitInfo& split_info,
 667                                     HeapWord* source_beg, HeapWord* source_end,
 668                                     HeapWord** source_next,
 669                                     HeapWord* target_beg, HeapWord* target_end,
 670                                     HeapWord** target_next)
 671 {
 672   HeapWord* const source_next_val = source_next == NULL ? NULL : *source_next;
 673   log_develop_trace(gc, compaction, phases)(
 674       "sb=" PTR_FORMAT " se=" PTR_FORMAT " sn=" PTR_FORMAT
 675       "tb=" PTR_FORMAT " te=" PTR_FORMAT " tn=" PTR_FORMAT,
 676       p2i(source_beg), p2i(source_end), p2i(source_next_val),
 677       p2i(target_beg), p2i(target_end), p2i(*target_next));
 678 
 679   size_t cur_region = addr_to_region_idx(source_beg);
 680   const size_t end_region = addr_to_region_idx(region_align_up(source_end));
 681 
 682   HeapWord *dest_addr = target_beg;
 683   while (cur_region < end_region) {
 684     // The destination must be set even if the region has no data.
 685     _region_data[cur_region].set_destination(dest_addr);
 686 
 687     size_t words = _region_data[cur_region].data_size();
 688     if (words > 0) {
 689       // If cur_region does not fit entirely into the target space, find a point
 690       // at which the source space can be 'split' so that part is copied to the
 691       // target space and the rest is copied elsewhere.
 692       if (dest_addr + words > target_end) {
 693         assert(source_next != NULL, "source_next is NULL when splitting");
 694         *source_next = summarize_split_space(cur_region, split_info, dest_addr,
 695                                              target_end, target_next);
 696         return false;
 697       }
 698 
 699       // Compute the destination_count for cur_region, and if necessary, update
 700       // source_region for a destination region.  The source_region field is
 701       // updated if cur_region is the first (left-most) region to be copied to a
 702       // destination region.
 703       //
 704       // The destination_count calculation is a bit subtle.  A region that has
 705       // data that compacts into itself does not count itself as a destination.
 706       // This maintains the invariant that a zero count means the region is
 707       // available and can be claimed and then filled.
 708       uint destination_count = 0;
 709       if (split_info.is_split(cur_region)) {
 710         // The current region has been split:  the partial object will be copied
 711         // to one destination space and the remaining data will be copied to
 712         // another destination space.  Adjust the initial destination_count and,
 713         // if necessary, set the source_region field if the partial object will
 714         // cross a destination region boundary.
 715         destination_count = split_info.destination_count();
 716         if (destination_count == 2) {
 717           size_t dest_idx = addr_to_region_idx(split_info.dest_region_addr());
 718           _region_data[dest_idx].set_source_region(cur_region);
 719         }
 720       }
 721 
 722       HeapWord* const last_addr = dest_addr + words - 1;
 723       const size_t dest_region_1 = addr_to_region_idx(dest_addr);
 724       const size_t dest_region_2 = addr_to_region_idx(last_addr);
 725 
 726       // Initially assume that the destination regions will be the same and
 727       // adjust the value below if necessary.  Under this assumption, if
 728       // cur_region == dest_region_2, then cur_region will be compacted
 729       // completely into itself.
 730       destination_count += cur_region == dest_region_2 ? 0 : 1;
 731       if (dest_region_1 != dest_region_2) {
 732         // Destination regions differ; adjust destination_count.
 733         destination_count += 1;
 734         // Data from cur_region will be copied to the start of dest_region_2.
 735         _region_data[dest_region_2].set_source_region(cur_region);
 736       } else if (region_offset(dest_addr) == 0) {
 737         // Data from cur_region will be copied to the start of the destination
 738         // region.
 739         _region_data[dest_region_1].set_source_region(cur_region);
 740       }
 741 
 742       _region_data[cur_region].set_destination_count(destination_count);
 743       _region_data[cur_region].set_data_location(region_to_addr(cur_region));
 744       dest_addr += words;
 745     }
 746 
 747     ++cur_region;
 748   }
 749 
 750   *target_next = dest_addr;
 751   return true;
 752 }
 753 
 754 HeapWord* ParallelCompactData::calc_new_pointer(HeapWord* addr, ParCompactionManager* cm) {
 755   assert(addr != NULL, "Should detect NULL oop earlier");
 756   assert(ParallelScavengeHeap::heap()->is_in(addr), "not in heap");
 757   assert(PSParallelCompact::mark_bitmap()->is_marked(addr), "not marked");
 758 
 759   // Region covering the object.
 760   RegionData* const region_ptr = addr_to_region_ptr(addr);
 761   HeapWord* result = region_ptr->destination();
 762 
 763   // If the entire Region is live, the new location is region->destination + the
 764   // offset of the object within in the Region.
 765 
 766   // Run some performance tests to determine if this special case pays off.  It
 767   // is worth it for pointers into the dense prefix.  If the optimization to
 768   // avoid pointer updates in regions that only point to the dense prefix is
 769   // ever implemented, this should be revisited.
 770   if (region_ptr->data_size() == RegionSize) {
 771     result += region_offset(addr);
 772     return result;
 773   }
 774 
 775   // Otherwise, the new location is region->destination + block offset + the
 776   // number of live words in the Block that are (a) to the left of addr and (b)
 777   // due to objects that start in the Block.
 778 
 779   // Fill in the block table if necessary.  This is unsynchronized, so multiple
 780   // threads may fill the block table for a region (harmless, since it is
 781   // idempotent).
 782   if (!region_ptr->blocks_filled()) {
 783     PSParallelCompact::fill_blocks(addr_to_region_idx(addr));
 784     region_ptr->set_blocks_filled();
 785   }
 786 
 787   HeapWord* const search_start = block_align_down(addr);
 788   const size_t block_offset = addr_to_block_ptr(addr)->offset();
 789 
 790   const ParMarkBitMap* bitmap = PSParallelCompact::mark_bitmap();
 791   const size_t live = bitmap->live_words_in_range(cm, search_start, oop(addr));
 792   result += block_offset + live;
 793   DEBUG_ONLY(PSParallelCompact::check_new_location(addr, result));
 794   return result;
 795 }
 796 
 797 #ifdef ASSERT
 798 void ParallelCompactData::verify_clear(const PSVirtualSpace* vspace)
 799 {
 800   const size_t* const beg = (const size_t*)vspace->committed_low_addr();
 801   const size_t* const end = (const size_t*)vspace->committed_high_addr();
 802   for (const size_t* p = beg; p < end; ++p) {
 803     assert(*p == 0, "not zero");
 804   }
 805 }
 806 
 807 void ParallelCompactData::verify_clear()
 808 {
 809   verify_clear(_region_vspace);
 810   verify_clear(_block_vspace);
 811 }
 812 #endif  // #ifdef ASSERT
 813 
 814 STWGCTimer          PSParallelCompact::_gc_timer;
 815 ParallelOldTracer   PSParallelCompact::_gc_tracer;
 816 elapsedTimer        PSParallelCompact::_accumulated_time;
 817 unsigned int        PSParallelCompact::_total_invocations = 0;
 818 unsigned int        PSParallelCompact::_maximum_compaction_gc_num = 0;
 819 jlong               PSParallelCompact::_time_of_last_gc = 0;
 820 CollectorCounters*  PSParallelCompact::_counters = NULL;
 821 ParMarkBitMap       PSParallelCompact::_mark_bitmap;
 822 ParallelCompactData PSParallelCompact::_summary_data;
 823 
 824 PSParallelCompact::IsAliveClosure PSParallelCompact::_is_alive_closure;
 825 
 826 bool PSParallelCompact::IsAliveClosure::do_object_b(oop p) { return mark_bitmap()->is_marked(p); }
 827 



 828 void PSParallelCompact::AdjustKlassClosure::do_klass(Klass* klass) {
 829   PSParallelCompact::AdjustPointerClosure closure(_cm);
 830   klass->oops_do(&closure);
 831 }
 832 
 833 void PSParallelCompact::post_initialize() {
 834   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
 835   MemRegion mr = heap->reserved_region();
 836   _ref_processor =
 837     new ReferenceProcessor(mr,            // span
 838                            ParallelRefProcEnabled && (ParallelGCThreads > 1), // mt processing
 839                            ParallelGCThreads, // mt processing degree
 840                            true,              // mt discovery
 841                            ParallelGCThreads, // mt discovery degree
 842                            true,              // atomic_discovery
 843                            &_is_alive_closure); // non-header is alive closure
 844   _counters = new CollectorCounters("PSParallelCompact", 1);
 845 
 846   // Initialize static fields in ParCompactionManager.
 847   ParCompactionManager::initialize(mark_bitmap());
 848 }
 849 
 850 bool PSParallelCompact::initialize() {
 851   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
 852   MemRegion mr = heap->reserved_region();
 853 
 854   // Was the old gen get allocated successfully?
 855   if (!heap->old_gen()->is_allocated()) {
 856     return false;
 857   }
 858 
 859   initialize_space_info();
 860   initialize_dead_wood_limiter();
 861 
 862   if (!_mark_bitmap.initialize(mr)) {
 863     vm_shutdown_during_initialization(
 864       err_msg("Unable to allocate " SIZE_FORMAT "KB bitmaps for parallel "
 865       "garbage collection for the requested " SIZE_FORMAT "KB heap.",
 866       _mark_bitmap.reserved_byte_size()/K, mr.byte_size()/K));
 867     return false;
 868   }
 869 
 870   if (!_summary_data.initialize(mr)) {
 871     vm_shutdown_during_initialization(
 872       err_msg("Unable to allocate " SIZE_FORMAT "KB card tables for parallel "
 873       "garbage collection for the requested " SIZE_FORMAT "KB heap.",
 874       _summary_data.reserved_byte_size()/K, mr.byte_size()/K));
 875     return false;
 876   }
 877 
 878   return true;
 879 }
 880 
 881 void PSParallelCompact::initialize_space_info()
 882 {
 883   memset(&_space_info, 0, sizeof(_space_info));
 884 
 885   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
 886   PSYoungGen* young_gen = heap->young_gen();
 887 
 888   _space_info[old_space_id].set_space(heap->old_gen()->object_space());
 889   _space_info[eden_space_id].set_space(young_gen->eden_space());
 890   _space_info[from_space_id].set_space(young_gen->from_space());
 891   _space_info[to_space_id].set_space(young_gen->to_space());
 892 
 893   _space_info[old_space_id].set_start_array(heap->old_gen()->start_array());
 894 }
 895 
 896 void PSParallelCompact::initialize_dead_wood_limiter()
 897 {
 898   const size_t max = 100;
 899   _dwl_mean = double(MIN2(ParallelOldDeadWoodLimiterMean, max)) / 100.0;
 900   _dwl_std_dev = double(MIN2(ParallelOldDeadWoodLimiterStdDev, max)) / 100.0;
 901   _dwl_first_term = 1.0 / (sqrt(2.0 * M_PI) * _dwl_std_dev);
 902   DEBUG_ONLY(_dwl_initialized = true;)
 903   _dwl_adjustment = normal_distribution(1.0);
 904 }
 905 
 906 void
 907 PSParallelCompact::clear_data_covering_space(SpaceId id)
 908 {
 909   // At this point, top is the value before GC, new_top() is the value that will
 910   // be set at the end of GC.  The marking bitmap is cleared to top; nothing
 911   // should be marked above top.  The summary data is cleared to the larger of
 912   // top & new_top.
 913   MutableSpace* const space = _space_info[id].space();
 914   HeapWord* const bot = space->bottom();
 915   HeapWord* const top = space->top();
 916   HeapWord* const max_top = MAX2(top, _space_info[id].new_top());
 917 
 918   const idx_t beg_bit = _mark_bitmap.addr_to_bit(bot);
 919   const idx_t end_bit = BitMap::word_align_up(_mark_bitmap.addr_to_bit(top));
 920   _mark_bitmap.clear_range(beg_bit, end_bit);
 921 
 922   const size_t beg_region = _summary_data.addr_to_region_idx(bot);
 923   const size_t end_region =
 924     _summary_data.addr_to_region_idx(_summary_data.region_align_up(max_top));
 925   _summary_data.clear_range(beg_region, end_region);
 926 
 927   // Clear the data used to 'split' regions.
 928   SplitInfo& split_info = _space_info[id].split_info();
 929   if (split_info.is_valid()) {
 930     split_info.clear();
 931   }
 932   DEBUG_ONLY(split_info.verify_clear();)
 933 }
 934 
 935 void PSParallelCompact::pre_compact()
 936 {
 937   // Update the from & to space pointers in space_info, since they are swapped
 938   // at each young gen gc.  Do the update unconditionally (even though a
 939   // promotion failure does not swap spaces) because an unknown number of young
 940   // collections will have swapped the spaces an unknown number of times.
 941   GCTraceTime(Trace, gc, phases) tm("Pre Compact", &_gc_timer);
 942   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
 943   _space_info[from_space_id].set_space(heap->young_gen()->from_space());
 944   _space_info[to_space_id].set_space(heap->young_gen()->to_space());
 945 
 946   DEBUG_ONLY(add_obj_count = add_obj_size = 0;)
 947   DEBUG_ONLY(mark_bitmap_count = mark_bitmap_size = 0;)
 948 
 949   // Increment the invocation count
 950   heap->increment_total_collections(true);
 951 
 952   // We need to track unique mark sweep invocations as well.
 953   _total_invocations++;
 954 
 955   heap->print_heap_before_gc();
 956   heap->trace_heap_before_gc(&_gc_tracer);
 957 
 958   // Fill in TLABs
 959   heap->accumulate_statistics_all_tlabs();
 960   heap->ensure_parsability(true);  // retire TLABs
 961 
 962   if (VerifyBeforeGC && heap->total_collections() >= VerifyGCStartAt) {
 963     HandleMark hm;  // Discard invalid handles created during verification
 964     Universe::verify("Before GC");
 965   }
 966 
 967   // Verify object start arrays
 968   if (VerifyObjectStartArray &&
 969       VerifyBeforeGC) {
 970     heap->old_gen()->verify_object_start_array();
 971   }
 972 
 973   DEBUG_ONLY(mark_bitmap()->verify_clear();)
 974   DEBUG_ONLY(summary_data().verify_clear();)
 975 
 976   // Have worker threads release resources the next time they run a task.
 977   gc_task_manager()->release_all_resources();
 978 
 979   ParCompactionManager::reset_cache_for_bitmap();
 980 }
 981 
 982 void PSParallelCompact::post_compact()
 983 {
 984   GCTraceTime(Trace, gc, phases) tm("Post Compact", &_gc_timer);
 985 
 986   for (unsigned int id = old_space_id; id < last_space_id; ++id) {
 987     // Clear the marking bitmap, summary data and split info.
 988     clear_data_covering_space(SpaceId(id));
 989     // Update top().  Must be done after clearing the bitmap and summary data.
 990     _space_info[id].publish_new_top();
 991   }
 992 
 993   MutableSpace* const eden_space = _space_info[eden_space_id].space();
 994   MutableSpace* const from_space = _space_info[from_space_id].space();
 995   MutableSpace* const to_space   = _space_info[to_space_id].space();
 996 
 997   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
 998   bool eden_empty = eden_space->is_empty();
 999   if (!eden_empty) {
1000     eden_empty = absorb_live_data_from_eden(heap->size_policy(),
1001                                             heap->young_gen(), heap->old_gen());
1002   }
1003 
1004   // Update heap occupancy information which is used as input to the soft ref
1005   // clearing policy at the next gc.
1006   Universe::update_heap_info_at_gc();
1007 
1008   bool young_gen_empty = eden_empty && from_space->is_empty() &&
1009     to_space->is_empty();
1010 
1011   ModRefBarrierSet* modBS = barrier_set_cast<ModRefBarrierSet>(heap->barrier_set());
1012   MemRegion old_mr = heap->old_gen()->reserved();
1013   if (young_gen_empty) {
1014     modBS->clear(MemRegion(old_mr.start(), old_mr.end()));
1015   } else {
1016     modBS->invalidate(MemRegion(old_mr.start(), old_mr.end()));
1017   }
1018 
1019   // Delete metaspaces for unloaded class loaders and clean up loader_data graph
1020   ClassLoaderDataGraph::purge();
1021   MetaspaceAux::verify_metrics();
1022 
1023   CodeCache::gc_epilogue();
1024   JvmtiExport::gc_epilogue();
1025 
1026 #if defined(COMPILER2) || INCLUDE_JVMCI
1027   DerivedPointerTable::update_pointers();
1028 #endif
1029 
1030   ref_processor()->enqueue_discovered_references(NULL);
1031 
1032   if (ZapUnusedHeapArea) {
1033     heap->gen_mangle_unused_area();
1034   }
1035 
1036   // Update time of last GC
1037   reset_millis_since_last_gc();
1038 }
1039 
1040 HeapWord*
1041 PSParallelCompact::compute_dense_prefix_via_density(const SpaceId id,
1042                                                     bool maximum_compaction)
1043 {
1044   const size_t region_size = ParallelCompactData::RegionSize;
1045   const ParallelCompactData& sd = summary_data();
1046 
1047   const MutableSpace* const space = _space_info[id].space();
1048   HeapWord* const top_aligned_up = sd.region_align_up(space->top());
1049   const RegionData* const beg_cp = sd.addr_to_region_ptr(space->bottom());
1050   const RegionData* const end_cp = sd.addr_to_region_ptr(top_aligned_up);
1051 
1052   // Skip full regions at the beginning of the space--they are necessarily part
1053   // of the dense prefix.
1054   size_t full_count = 0;
1055   const RegionData* cp;
1056   for (cp = beg_cp; cp < end_cp && cp->data_size() == region_size; ++cp) {
1057     ++full_count;
1058   }
1059 
1060   assert(total_invocations() >= _maximum_compaction_gc_num, "sanity");
1061   const size_t gcs_since_max = total_invocations() - _maximum_compaction_gc_num;
1062   const bool interval_ended = gcs_since_max > HeapMaximumCompactionInterval;
1063   if (maximum_compaction || cp == end_cp || interval_ended) {
1064     _maximum_compaction_gc_num = total_invocations();
1065     return sd.region_to_addr(cp);
1066   }
1067 
1068   HeapWord* const new_top = _space_info[id].new_top();
1069   const size_t space_live = pointer_delta(new_top, space->bottom());
1070   const size_t space_used = space->used_in_words();
1071   const size_t space_capacity = space->capacity_in_words();
1072 
1073   const double cur_density = double(space_live) / space_capacity;
1074   const double deadwood_density =
1075     (1.0 - cur_density) * (1.0 - cur_density) * cur_density * cur_density;
1076   const size_t deadwood_goal = size_t(space_capacity * deadwood_density);
1077 
1078   if (TraceParallelOldGCDensePrefix) {
1079     tty->print_cr("cur_dens=%5.3f dw_dens=%5.3f dw_goal=" SIZE_FORMAT,
1080                   cur_density, deadwood_density, deadwood_goal);
1081     tty->print_cr("space_live=" SIZE_FORMAT " " "space_used=" SIZE_FORMAT " "
1082                   "space_cap=" SIZE_FORMAT,
1083                   space_live, space_used,
1084                   space_capacity);
1085   }
1086 
1087   // XXX - Use binary search?
1088   HeapWord* dense_prefix = sd.region_to_addr(cp);
1089   const RegionData* full_cp = cp;
1090   const RegionData* const top_cp = sd.addr_to_region_ptr(space->top() - 1);
1091   while (cp < end_cp) {
1092     HeapWord* region_destination = cp->destination();
1093     const size_t cur_deadwood = pointer_delta(dense_prefix, region_destination);
1094     if (TraceParallelOldGCDensePrefix && Verbose) {
1095       tty->print_cr("c#=" SIZE_FORMAT_W(4) " dst=" PTR_FORMAT " "
1096                     "dp=" PTR_FORMAT " " "cdw=" SIZE_FORMAT_W(8),
1097                     sd.region(cp), p2i(region_destination),
1098                     p2i(dense_prefix), cur_deadwood);
1099     }
1100 
1101     if (cur_deadwood >= deadwood_goal) {
1102       // Found the region that has the correct amount of deadwood to the left.
1103       // This typically occurs after crossing a fairly sparse set of regions, so
1104       // iterate backwards over those sparse regions, looking for the region
1105       // that has the lowest density of live objects 'to the right.'
1106       size_t space_to_left = sd.region(cp) * region_size;
1107       size_t live_to_left = space_to_left - cur_deadwood;
1108       size_t space_to_right = space_capacity - space_to_left;
1109       size_t live_to_right = space_live - live_to_left;
1110       double density_to_right = double(live_to_right) / space_to_right;
1111       while (cp > full_cp) {
1112         --cp;
1113         const size_t prev_region_live_to_right = live_to_right -
1114           cp->data_size();
1115         const size_t prev_region_space_to_right = space_to_right + region_size;
1116         double prev_region_density_to_right =
1117           double(prev_region_live_to_right) / prev_region_space_to_right;
1118         if (density_to_right <= prev_region_density_to_right) {
1119           return dense_prefix;
1120         }
1121         if (TraceParallelOldGCDensePrefix && Verbose) {
1122           tty->print_cr("backing up from c=" SIZE_FORMAT_W(4) " d2r=%10.8f "
1123                         "pc_d2r=%10.8f", sd.region(cp), density_to_right,
1124                         prev_region_density_to_right);
1125         }
1126         dense_prefix -= region_size;
1127         live_to_right = prev_region_live_to_right;
1128         space_to_right = prev_region_space_to_right;
1129         density_to_right = prev_region_density_to_right;
1130       }
1131       return dense_prefix;
1132     }
1133 
1134     dense_prefix += region_size;
1135     ++cp;
1136   }
1137 
1138   return dense_prefix;
1139 }
1140 
1141 #ifndef PRODUCT
1142 void PSParallelCompact::print_dense_prefix_stats(const char* const algorithm,
1143                                                  const SpaceId id,
1144                                                  const bool maximum_compaction,
1145                                                  HeapWord* const addr)
1146 {
1147   const size_t region_idx = summary_data().addr_to_region_idx(addr);
1148   RegionData* const cp = summary_data().region(region_idx);
1149   const MutableSpace* const space = _space_info[id].space();
1150   HeapWord* const new_top = _space_info[id].new_top();
1151 
1152   const size_t space_live = pointer_delta(new_top, space->bottom());
1153   const size_t dead_to_left = pointer_delta(addr, cp->destination());
1154   const size_t space_cap = space->capacity_in_words();
1155   const double dead_to_left_pct = double(dead_to_left) / space_cap;
1156   const size_t live_to_right = new_top - cp->destination();
1157   const size_t dead_to_right = space->top() - addr - live_to_right;
1158 
1159   tty->print_cr("%s=" PTR_FORMAT " dpc=" SIZE_FORMAT_W(5) " "
1160                 "spl=" SIZE_FORMAT " "
1161                 "d2l=" SIZE_FORMAT " d2l%%=%6.4f "
1162                 "d2r=" SIZE_FORMAT " l2r=" SIZE_FORMAT
1163                 " ratio=%10.8f",
1164                 algorithm, p2i(addr), region_idx,
1165                 space_live,
1166                 dead_to_left, dead_to_left_pct,
1167                 dead_to_right, live_to_right,
1168                 double(dead_to_right) / live_to_right);
1169 }
1170 #endif  // #ifndef PRODUCT
1171 
1172 // Return a fraction indicating how much of the generation can be treated as
1173 // "dead wood" (i.e., not reclaimed).  The function uses a normal distribution
1174 // based on the density of live objects in the generation to determine a limit,
1175 // which is then adjusted so the return value is min_percent when the density is
1176 // 1.
1177 //
1178 // The following table shows some return values for a different values of the
1179 // standard deviation (ParallelOldDeadWoodLimiterStdDev); the mean is 0.5 and
1180 // min_percent is 1.
1181 //
1182 //                          fraction allowed as dead wood
1183 //         -----------------------------------------------------------------
1184 // density std_dev=70 std_dev=75 std_dev=80 std_dev=85 std_dev=90 std_dev=95
1185 // ------- ---------- ---------- ---------- ---------- ---------- ----------
1186 // 0.00000 0.01000000 0.01000000 0.01000000 0.01000000 0.01000000 0.01000000
1187 // 0.05000 0.03193096 0.02836880 0.02550828 0.02319280 0.02130337 0.01974941
1188 // 0.10000 0.05247504 0.04547452 0.03988045 0.03537016 0.03170171 0.02869272
1189 // 0.15000 0.07135702 0.06111390 0.05296419 0.04641639 0.04110601 0.03676066
1190 // 0.20000 0.08831616 0.07509618 0.06461766 0.05622444 0.04943437 0.04388975
1191 // 0.25000 0.10311208 0.08724696 0.07471205 0.06469760 0.05661313 0.05002313
1192 // 0.30000 0.11553050 0.09741183 0.08313394 0.07175114 0.06257797 0.05511132
1193 // 0.35000 0.12538832 0.10545958 0.08978741 0.07731366 0.06727491 0.05911289
1194 // 0.40000 0.13253818 0.11128511 0.09459590 0.08132834 0.07066107 0.06199500
1195 // 0.45000 0.13687208 0.11481163 0.09750361 0.08375387 0.07270534 0.06373386
1196 // 0.50000 0.13832410 0.11599237 0.09847664 0.08456518 0.07338887 0.06431510
1197 // 0.55000 0.13687208 0.11481163 0.09750361 0.08375387 0.07270534 0.06373386
1198 // 0.60000 0.13253818 0.11128511 0.09459590 0.08132834 0.07066107 0.06199500
1199 // 0.65000 0.12538832 0.10545958 0.08978741 0.07731366 0.06727491 0.05911289
1200 // 0.70000 0.11553050 0.09741183 0.08313394 0.07175114 0.06257797 0.05511132
1201 // 0.75000 0.10311208 0.08724696 0.07471205 0.06469760 0.05661313 0.05002313
1202 // 0.80000 0.08831616 0.07509618 0.06461766 0.05622444 0.04943437 0.04388975
1203 // 0.85000 0.07135702 0.06111390 0.05296419 0.04641639 0.04110601 0.03676066
1204 // 0.90000 0.05247504 0.04547452 0.03988045 0.03537016 0.03170171 0.02869272
1205 // 0.95000 0.03193096 0.02836880 0.02550828 0.02319280 0.02130337 0.01974941
1206 // 1.00000 0.01000000 0.01000000 0.01000000 0.01000000 0.01000000 0.01000000
1207 
1208 double PSParallelCompact::dead_wood_limiter(double density, size_t min_percent)
1209 {
1210   assert(_dwl_initialized, "uninitialized");
1211 
1212   // The raw limit is the value of the normal distribution at x = density.
1213   const double raw_limit = normal_distribution(density);
1214 
1215   // Adjust the raw limit so it becomes the minimum when the density is 1.
1216   //
1217   // First subtract the adjustment value (which is simply the precomputed value
1218   // normal_distribution(1.0)); this yields a value of 0 when the density is 1.
1219   // Then add the minimum value, so the minimum is returned when the density is
1220   // 1.  Finally, prevent negative values, which occur when the mean is not 0.5.
1221   const double min = double(min_percent) / 100.0;
1222   const double limit = raw_limit - _dwl_adjustment + min;
1223   return MAX2(limit, 0.0);
1224 }
1225 
1226 ParallelCompactData::RegionData*
1227 PSParallelCompact::first_dead_space_region(const RegionData* beg,
1228                                            const RegionData* end)
1229 {
1230   const size_t region_size = ParallelCompactData::RegionSize;
1231   ParallelCompactData& sd = summary_data();
1232   size_t left = sd.region(beg);
1233   size_t right = end > beg ? sd.region(end) - 1 : left;
1234 
1235   // Binary search.
1236   while (left < right) {
1237     // Equivalent to (left + right) / 2, but does not overflow.
1238     const size_t middle = left + (right - left) / 2;
1239     RegionData* const middle_ptr = sd.region(middle);
1240     HeapWord* const dest = middle_ptr->destination();
1241     HeapWord* const addr = sd.region_to_addr(middle);
1242     assert(dest != NULL, "sanity");
1243     assert(dest <= addr, "must move left");
1244 
1245     if (middle > left && dest < addr) {
1246       right = middle - 1;
1247     } else if (middle < right && middle_ptr->data_size() == region_size) {
1248       left = middle + 1;
1249     } else {
1250       return middle_ptr;
1251     }
1252   }
1253   return sd.region(left);
1254 }
1255 
1256 ParallelCompactData::RegionData*
1257 PSParallelCompact::dead_wood_limit_region(const RegionData* beg,
1258                                           const RegionData* end,
1259                                           size_t dead_words)
1260 {
1261   ParallelCompactData& sd = summary_data();
1262   size_t left = sd.region(beg);
1263   size_t right = end > beg ? sd.region(end) - 1 : left;
1264 
1265   // Binary search.
1266   while (left < right) {
1267     // Equivalent to (left + right) / 2, but does not overflow.
1268     const size_t middle = left + (right - left) / 2;
1269     RegionData* const middle_ptr = sd.region(middle);
1270     HeapWord* const dest = middle_ptr->destination();
1271     HeapWord* const addr = sd.region_to_addr(middle);
1272     assert(dest != NULL, "sanity");
1273     assert(dest <= addr, "must move left");
1274 
1275     const size_t dead_to_left = pointer_delta(addr, dest);
1276     if (middle > left && dead_to_left > dead_words) {
1277       right = middle - 1;
1278     } else if (middle < right && dead_to_left < dead_words) {
1279       left = middle + 1;
1280     } else {
1281       return middle_ptr;
1282     }
1283   }
1284   return sd.region(left);
1285 }
1286 
1287 // The result is valid during the summary phase, after the initial summarization
1288 // of each space into itself, and before final summarization.
1289 inline double
1290 PSParallelCompact::reclaimed_ratio(const RegionData* const cp,
1291                                    HeapWord* const bottom,
1292                                    HeapWord* const top,
1293                                    HeapWord* const new_top)
1294 {
1295   ParallelCompactData& sd = summary_data();
1296 
1297   assert(cp != NULL, "sanity");
1298   assert(bottom != NULL, "sanity");
1299   assert(top != NULL, "sanity");
1300   assert(new_top != NULL, "sanity");
1301   assert(top >= new_top, "summary data problem?");
1302   assert(new_top > bottom, "space is empty; should not be here");
1303   assert(new_top >= cp->destination(), "sanity");
1304   assert(top >= sd.region_to_addr(cp), "sanity");
1305 
1306   HeapWord* const destination = cp->destination();
1307   const size_t dense_prefix_live  = pointer_delta(destination, bottom);
1308   const size_t compacted_region_live = pointer_delta(new_top, destination);
1309   const size_t compacted_region_used = pointer_delta(top,
1310                                                      sd.region_to_addr(cp));
1311   const size_t reclaimable = compacted_region_used - compacted_region_live;
1312 
1313   const double divisor = dense_prefix_live + 1.25 * compacted_region_live;
1314   return double(reclaimable) / divisor;
1315 }
1316 
1317 // Return the address of the end of the dense prefix, a.k.a. the start of the
1318 // compacted region.  The address is always on a region boundary.
1319 //
1320 // Completely full regions at the left are skipped, since no compaction can
1321 // occur in those regions.  Then the maximum amount of dead wood to allow is
1322 // computed, based on the density (amount live / capacity) of the generation;
1323 // the region with approximately that amount of dead space to the left is
1324 // identified as the limit region.  Regions between the last completely full
1325 // region and the limit region are scanned and the one that has the best
1326 // (maximum) reclaimed_ratio() is selected.
1327 HeapWord*
1328 PSParallelCompact::compute_dense_prefix(const SpaceId id,
1329                                         bool maximum_compaction)
1330 {
1331   const size_t region_size = ParallelCompactData::RegionSize;
1332   const ParallelCompactData& sd = summary_data();
1333 
1334   const MutableSpace* const space = _space_info[id].space();
1335   HeapWord* const top = space->top();
1336   HeapWord* const top_aligned_up = sd.region_align_up(top);
1337   HeapWord* const new_top = _space_info[id].new_top();
1338   HeapWord* const new_top_aligned_up = sd.region_align_up(new_top);
1339   HeapWord* const bottom = space->bottom();
1340   const RegionData* const beg_cp = sd.addr_to_region_ptr(bottom);
1341   const RegionData* const top_cp = sd.addr_to_region_ptr(top_aligned_up);
1342   const RegionData* const new_top_cp =
1343     sd.addr_to_region_ptr(new_top_aligned_up);
1344 
1345   // Skip full regions at the beginning of the space--they are necessarily part
1346   // of the dense prefix.
1347   const RegionData* const full_cp = first_dead_space_region(beg_cp, new_top_cp);
1348   assert(full_cp->destination() == sd.region_to_addr(full_cp) ||
1349          space->is_empty(), "no dead space allowed to the left");
1350   assert(full_cp->data_size() < region_size || full_cp == new_top_cp - 1,
1351          "region must have dead space");
1352 
1353   // The gc number is saved whenever a maximum compaction is done, and used to
1354   // determine when the maximum compaction interval has expired.  This avoids
1355   // successive max compactions for different reasons.
1356   assert(total_invocations() >= _maximum_compaction_gc_num, "sanity");
1357   const size_t gcs_since_max = total_invocations() - _maximum_compaction_gc_num;
1358   const bool interval_ended = gcs_since_max > HeapMaximumCompactionInterval ||
1359     total_invocations() == HeapFirstMaximumCompactionCount;
1360   if (maximum_compaction || full_cp == top_cp || interval_ended) {
1361     _maximum_compaction_gc_num = total_invocations();
1362     return sd.region_to_addr(full_cp);
1363   }
1364 
1365   const size_t space_live = pointer_delta(new_top, bottom);
1366   const size_t space_used = space->used_in_words();
1367   const size_t space_capacity = space->capacity_in_words();
1368 
1369   const double density = double(space_live) / double(space_capacity);
1370   const size_t min_percent_free = MarkSweepDeadRatio;
1371   const double limiter = dead_wood_limiter(density, min_percent_free);
1372   const size_t dead_wood_max = space_used - space_live;
1373   const size_t dead_wood_limit = MIN2(size_t(space_capacity * limiter),
1374                                       dead_wood_max);
1375 
1376   if (TraceParallelOldGCDensePrefix) {
1377     tty->print_cr("space_live=" SIZE_FORMAT " " "space_used=" SIZE_FORMAT " "
1378                   "space_cap=" SIZE_FORMAT,
1379                   space_live, space_used,
1380                   space_capacity);
1381     tty->print_cr("dead_wood_limiter(%6.4f, " SIZE_FORMAT ")=%6.4f "
1382                   "dead_wood_max=" SIZE_FORMAT " dead_wood_limit=" SIZE_FORMAT,
1383                   density, min_percent_free, limiter,
1384                   dead_wood_max, dead_wood_limit);
1385   }
1386 
1387   // Locate the region with the desired amount of dead space to the left.
1388   const RegionData* const limit_cp =
1389     dead_wood_limit_region(full_cp, top_cp, dead_wood_limit);
1390 
1391   // Scan from the first region with dead space to the limit region and find the
1392   // one with the best (largest) reclaimed ratio.
1393   double best_ratio = 0.0;
1394   const RegionData* best_cp = full_cp;
1395   for (const RegionData* cp = full_cp; cp < limit_cp; ++cp) {
1396     double tmp_ratio = reclaimed_ratio(cp, bottom, top, new_top);
1397     if (tmp_ratio > best_ratio) {
1398       best_cp = cp;
1399       best_ratio = tmp_ratio;
1400     }
1401   }
1402 
1403   return sd.region_to_addr(best_cp);
1404 }
1405 
1406 void PSParallelCompact::summarize_spaces_quick()
1407 {
1408   for (unsigned int i = 0; i < last_space_id; ++i) {
1409     const MutableSpace* space = _space_info[i].space();
1410     HeapWord** nta = _space_info[i].new_top_addr();
1411     bool result = _summary_data.summarize(_space_info[i].split_info(),
1412                                           space->bottom(), space->top(), NULL,
1413                                           space->bottom(), space->end(), nta);
1414     assert(result, "space must fit into itself");
1415     _space_info[i].set_dense_prefix(space->bottom());
1416   }
1417 }
1418 
1419 void PSParallelCompact::fill_dense_prefix_end(SpaceId id)
1420 {
1421   HeapWord* const dense_prefix_end = dense_prefix(id);
1422   const RegionData* region = _summary_data.addr_to_region_ptr(dense_prefix_end);
1423   const idx_t dense_prefix_bit = _mark_bitmap.addr_to_bit(dense_prefix_end);
1424   if (dead_space_crosses_boundary(region, dense_prefix_bit)) {
1425     // Only enough dead space is filled so that any remaining dead space to the
1426     // left is larger than the minimum filler object.  (The remainder is filled
1427     // during the copy/update phase.)
1428     //
1429     // The size of the dead space to the right of the boundary is not a
1430     // concern, since compaction will be able to use whatever space is
1431     // available.
1432     //
1433     // Here '||' is the boundary, 'x' represents a don't care bit and a box
1434     // surrounds the space to be filled with an object.
1435     //
1436     // In the 32-bit VM, each bit represents two 32-bit words:
1437     //                              +---+
1438     // a) beg_bits:  ...  x   x   x | 0 | ||   0   x  x  ...
1439     //    end_bits:  ...  x   x   x | 0 | ||   0   x  x  ...
1440     //                              +---+
1441     //
1442     // In the 64-bit VM, each bit represents one 64-bit word:
1443     //                              +------------+
1444     // b) beg_bits:  ...  x   x   x | 0   ||   0 | x  x  ...
1445     //    end_bits:  ...  x   x   1 | 0   ||   0 | x  x  ...
1446     //                              +------------+
1447     //                          +-------+
1448     // c) beg_bits:  ...  x   x | 0   0 | ||   0   x  x  ...
1449     //    end_bits:  ...  x   1 | 0   0 | ||   0   x  x  ...
1450     //                          +-------+
1451     //                      +-----------+
1452     // d) beg_bits:  ...  x | 0   0   0 | ||   0   x  x  ...
1453     //    end_bits:  ...  1 | 0   0   0 | ||   0   x  x  ...
1454     //                      +-----------+
1455     //                          +-------+
1456     // e) beg_bits:  ...  0   0 | 0   0 | ||   0   x  x  ...
1457     //    end_bits:  ...  0   0 | 0   0 | ||   0   x  x  ...
1458     //                          +-------+
1459 
1460     // Initially assume case a, c or e will apply.
1461     size_t obj_len = CollectedHeap::min_fill_size();
1462     HeapWord* obj_beg = dense_prefix_end - obj_len;
1463 
1464 #ifdef  _LP64
1465     if (MinObjAlignment > 1) { // object alignment > heap word size
1466       // Cases a, c or e.
1467     } else if (_mark_bitmap.is_obj_end(dense_prefix_bit - 2)) {
1468       // Case b above.
1469       obj_beg = dense_prefix_end - 1;
1470     } else if (!_mark_bitmap.is_obj_end(dense_prefix_bit - 3) &&
1471                _mark_bitmap.is_obj_end(dense_prefix_bit - 4)) {
1472       // Case d above.
1473       obj_beg = dense_prefix_end - 3;
1474       obj_len = 3;
1475     }
1476 #endif  // #ifdef _LP64
1477 
1478     CollectedHeap::fill_with_object(obj_beg, obj_len);
1479     _mark_bitmap.mark_obj(obj_beg, obj_len);
1480     _summary_data.add_obj(obj_beg, obj_len);
1481     assert(start_array(id) != NULL, "sanity");
1482     start_array(id)->allocate_block(obj_beg);
1483   }
1484 }
1485 
1486 void
1487 PSParallelCompact::clear_source_region(HeapWord* beg_addr, HeapWord* end_addr)
1488 {
1489   RegionData* const beg_ptr = _summary_data.addr_to_region_ptr(beg_addr);
1490   HeapWord* const end_aligned_up = _summary_data.region_align_up(end_addr);
1491   RegionData* const end_ptr = _summary_data.addr_to_region_ptr(end_aligned_up);
1492   for (RegionData* cur = beg_ptr; cur < end_ptr; ++cur) {
1493     cur->set_source_region(0);
1494   }
1495 }
1496 
1497 void
1498 PSParallelCompact::summarize_space(SpaceId id, bool maximum_compaction)
1499 {
1500   assert(id < last_space_id, "id out of range");
1501   assert(_space_info[id].dense_prefix() == _space_info[id].space()->bottom(),
1502          "should have been reset in summarize_spaces_quick()");
1503 
1504   const MutableSpace* space = _space_info[id].space();
1505   if (_space_info[id].new_top() != space->bottom()) {
1506     HeapWord* dense_prefix_end = compute_dense_prefix(id, maximum_compaction);
1507     _space_info[id].set_dense_prefix(dense_prefix_end);
1508 
1509 #ifndef PRODUCT
1510     if (TraceParallelOldGCDensePrefix) {
1511       print_dense_prefix_stats("ratio", id, maximum_compaction,
1512                                dense_prefix_end);
1513       HeapWord* addr = compute_dense_prefix_via_density(id, maximum_compaction);
1514       print_dense_prefix_stats("density", id, maximum_compaction, addr);
1515     }
1516 #endif  // #ifndef PRODUCT
1517 
1518     // Recompute the summary data, taking into account the dense prefix.  If
1519     // every last byte will be reclaimed, then the existing summary data which
1520     // compacts everything can be left in place.
1521     if (!maximum_compaction && dense_prefix_end != space->bottom()) {
1522       // If dead space crosses the dense prefix boundary, it is (at least
1523       // partially) filled with a dummy object, marked live and added to the
1524       // summary data.  This simplifies the copy/update phase and must be done
1525       // before the final locations of objects are determined, to prevent
1526       // leaving a fragment of dead space that is too small to fill.
1527       fill_dense_prefix_end(id);
1528 
1529       // Compute the destination of each Region, and thus each object.
1530       _summary_data.summarize_dense_prefix(space->bottom(), dense_prefix_end);
1531       _summary_data.summarize(_space_info[id].split_info(),
1532                               dense_prefix_end, space->top(), NULL,
1533                               dense_prefix_end, space->end(),
1534                               _space_info[id].new_top_addr());
1535     }
1536   }
1537 
1538   if (develop_log_is_enabled(Trace, gc, compaction, phases)) {
1539     const size_t region_size = ParallelCompactData::RegionSize;
1540     HeapWord* const dense_prefix_end = _space_info[id].dense_prefix();
1541     const size_t dp_region = _summary_data.addr_to_region_idx(dense_prefix_end);
1542     const size_t dp_words = pointer_delta(dense_prefix_end, space->bottom());
1543     HeapWord* const new_top = _space_info[id].new_top();
1544     const HeapWord* nt_aligned_up = _summary_data.region_align_up(new_top);
1545     const size_t cr_words = pointer_delta(nt_aligned_up, dense_prefix_end);
1546     log_develop_trace(gc, compaction, phases)(
1547         "id=%d cap=" SIZE_FORMAT " dp=" PTR_FORMAT " "
1548         "dp_region=" SIZE_FORMAT " " "dp_count=" SIZE_FORMAT " "
1549         "cr_count=" SIZE_FORMAT " " "nt=" PTR_FORMAT,
1550         id, space->capacity_in_words(), p2i(dense_prefix_end),
1551         dp_region, dp_words / region_size,
1552         cr_words / region_size, p2i(new_top));
1553   }
1554 }
1555 
1556 #ifndef PRODUCT
1557 void PSParallelCompact::summary_phase_msg(SpaceId dst_space_id,
1558                                           HeapWord* dst_beg, HeapWord* dst_end,
1559                                           SpaceId src_space_id,
1560                                           HeapWord* src_beg, HeapWord* src_end)
1561 {
1562   log_develop_trace(gc, compaction, phases)(
1563       "Summarizing %d [%s] into %d [%s]:  "
1564       "src=" PTR_FORMAT "-" PTR_FORMAT " "
1565       SIZE_FORMAT "-" SIZE_FORMAT " "
1566       "dst=" PTR_FORMAT "-" PTR_FORMAT " "
1567       SIZE_FORMAT "-" SIZE_FORMAT,
1568       src_space_id, space_names[src_space_id],
1569       dst_space_id, space_names[dst_space_id],
1570       p2i(src_beg), p2i(src_end),
1571       _summary_data.addr_to_region_idx(src_beg),
1572       _summary_data.addr_to_region_idx(src_end),
1573       p2i(dst_beg), p2i(dst_end),
1574       _summary_data.addr_to_region_idx(dst_beg),
1575       _summary_data.addr_to_region_idx(dst_end));
1576 }
1577 #endif  // #ifndef PRODUCT
1578 
1579 void PSParallelCompact::summary_phase(ParCompactionManager* cm,
1580                                       bool maximum_compaction)
1581 {
1582   GCTraceTime(Trace, gc, phases) tm("Summary Phase", &_gc_timer);
1583 
1584 #ifdef  ASSERT
1585   if (TraceParallelOldGCMarkingPhase) {
1586     tty->print_cr("add_obj_count=" SIZE_FORMAT " "
1587                   "add_obj_bytes=" SIZE_FORMAT,
1588                   add_obj_count, add_obj_size * HeapWordSize);
1589     tty->print_cr("mark_bitmap_count=" SIZE_FORMAT " "
1590                   "mark_bitmap_bytes=" SIZE_FORMAT,
1591                   mark_bitmap_count, mark_bitmap_size * HeapWordSize);
1592   }
1593 #endif  // #ifdef ASSERT
1594 
1595   // Quick summarization of each space into itself, to see how much is live.
1596   summarize_spaces_quick();
1597 
1598   log_develop_trace(gc, compaction, phases)("summary phase:  after summarizing each space to self");
1599   NOT_PRODUCT(print_region_ranges());
1600   NOT_PRODUCT(print_initial_summary_data(_summary_data, _space_info));
1601 
1602   // The amount of live data that will end up in old space (assuming it fits).
1603   size_t old_space_total_live = 0;
1604   for (unsigned int id = old_space_id; id < last_space_id; ++id) {
1605     old_space_total_live += pointer_delta(_space_info[id].new_top(),
1606                                           _space_info[id].space()->bottom());
1607   }
1608 
1609   MutableSpace* const old_space = _space_info[old_space_id].space();
1610   const size_t old_capacity = old_space->capacity_in_words();
1611   if (old_space_total_live > old_capacity) {
1612     // XXX - should also try to expand
1613     maximum_compaction = true;
1614   }
1615 
1616   // Old generations.
1617   summarize_space(old_space_id, maximum_compaction);
1618 
1619   // Summarize the remaining spaces in the young gen.  The initial target space
1620   // is the old gen.  If a space does not fit entirely into the target, then the
1621   // remainder is compacted into the space itself and that space becomes the new
1622   // target.
1623   SpaceId dst_space_id = old_space_id;
1624   HeapWord* dst_space_end = old_space->end();
1625   HeapWord** new_top_addr = _space_info[dst_space_id].new_top_addr();
1626   for (unsigned int id = eden_space_id; id < last_space_id; ++id) {
1627     const MutableSpace* space = _space_info[id].space();
1628     const size_t live = pointer_delta(_space_info[id].new_top(),
1629                                       space->bottom());
1630     const size_t available = pointer_delta(dst_space_end, *new_top_addr);
1631 
1632     NOT_PRODUCT(summary_phase_msg(dst_space_id, *new_top_addr, dst_space_end,
1633                                   SpaceId(id), space->bottom(), space->top());)
1634     if (live > 0 && live <= available) {
1635       // All the live data will fit.
1636       bool done = _summary_data.summarize(_space_info[id].split_info(),
1637                                           space->bottom(), space->top(),
1638                                           NULL,
1639                                           *new_top_addr, dst_space_end,
1640                                           new_top_addr);
1641       assert(done, "space must fit into old gen");
1642 
1643       // Reset the new_top value for the space.
1644       _space_info[id].set_new_top(space->bottom());
1645     } else if (live > 0) {
1646       // Attempt to fit part of the source space into the target space.
1647       HeapWord* next_src_addr = NULL;
1648       bool done = _summary_data.summarize(_space_info[id].split_info(),
1649                                           space->bottom(), space->top(),
1650                                           &next_src_addr,
1651                                           *new_top_addr, dst_space_end,
1652                                           new_top_addr);
1653       assert(!done, "space should not fit into old gen");
1654       assert(next_src_addr != NULL, "sanity");
1655 
1656       // The source space becomes the new target, so the remainder is compacted
1657       // within the space itself.
1658       dst_space_id = SpaceId(id);
1659       dst_space_end = space->end();
1660       new_top_addr = _space_info[id].new_top_addr();
1661       NOT_PRODUCT(summary_phase_msg(dst_space_id,
1662                                     space->bottom(), dst_space_end,
1663                                     SpaceId(id), next_src_addr, space->top());)
1664       done = _summary_data.summarize(_space_info[id].split_info(),
1665                                      next_src_addr, space->top(),
1666                                      NULL,
1667                                      space->bottom(), dst_space_end,
1668                                      new_top_addr);
1669       assert(done, "space must fit when compacted into itself");
1670       assert(*new_top_addr <= space->top(), "usage should not grow");
1671     }
1672   }
1673 
1674   log_develop_trace(gc, compaction, phases)("Summary_phase:  after final summarization");
1675   NOT_PRODUCT(print_region_ranges());
1676   NOT_PRODUCT(print_initial_summary_data(_summary_data, _space_info));
1677 }
1678 
1679 // This method should contain all heap-specific policy for invoking a full
1680 // collection.  invoke_no_policy() will only attempt to compact the heap; it
1681 // will do nothing further.  If we need to bail out for policy reasons, scavenge
1682 // before full gc, or any other specialized behavior, it needs to be added here.
1683 //
1684 // Note that this method should only be called from the vm_thread while at a
1685 // safepoint.
1686 //
1687 // Note that the all_soft_refs_clear flag in the collector policy
1688 // may be true because this method can be called without intervening
1689 // activity.  For example when the heap space is tight and full measure
1690 // are being taken to free space.
1691 void PSParallelCompact::invoke(bool maximum_heap_compaction) {
1692   assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
1693   assert(Thread::current() == (Thread*)VMThread::vm_thread(),
1694          "should be in vm thread");
1695 
1696   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
1697   GCCause::Cause gc_cause = heap->gc_cause();
1698   assert(!heap->is_gc_active(), "not reentrant");
1699 
1700   PSAdaptiveSizePolicy* policy = heap->size_policy();
1701   IsGCActiveMark mark;
1702 
1703   if (ScavengeBeforeFullGC) {
1704     PSScavenge::invoke_no_policy();
1705   }
1706 
1707   const bool clear_all_soft_refs =
1708     heap->collector_policy()->should_clear_all_soft_refs();
1709 
1710   PSParallelCompact::invoke_no_policy(clear_all_soft_refs ||
1711                                       maximum_heap_compaction);
1712 }
1713 
1714 // This method contains no policy. You should probably
1715 // be calling invoke() instead.
1716 bool PSParallelCompact::invoke_no_policy(bool maximum_heap_compaction) {
1717   assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
1718   assert(ref_processor() != NULL, "Sanity");
1719 
1720   if (GC_locker::check_active_before_gc()) {
1721     return false;
1722   }
1723 
1724   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
1725 
1726   GCIdMark gc_id_mark;
1727   _gc_timer.register_gc_start();
1728   _gc_tracer.report_gc_start(heap->gc_cause(), _gc_timer.gc_start());
1729 
1730   TimeStamp marking_start;
1731   TimeStamp compaction_start;
1732   TimeStamp collection_exit;
1733 
1734   GCCause::Cause gc_cause = heap->gc_cause();
1735   PSYoungGen* young_gen = heap->young_gen();
1736   PSOldGen* old_gen = heap->old_gen();
1737   PSAdaptiveSizePolicy* size_policy = heap->size_policy();
1738 
1739   // The scope of casr should end after code that can change
1740   // CollectorPolicy::_should_clear_all_soft_refs.
1741   ClearedAllSoftRefs casr(maximum_heap_compaction,
1742                           heap->collector_policy());
1743 
1744   if (ZapUnusedHeapArea) {
1745     // Save information needed to minimize mangling
1746     heap->record_gen_tops_before_GC();
1747   }
1748 
1749   heap->pre_full_gc_dump(&_gc_timer);
1750 
1751   // Make sure data structures are sane, make the heap parsable, and do other
1752   // miscellaneous bookkeeping.
1753   pre_compact();
1754 
1755   PreGCValues pre_gc_values(heap);
1756 
1757   // Get the compaction manager reserved for the VM thread.
1758   ParCompactionManager* const vmthread_cm =
1759     ParCompactionManager::manager_array(gc_task_manager()->workers());
1760 
1761   {
1762     ResourceMark rm;
1763     HandleMark hm;
1764 
1765     // Set the number of GC threads to be used in this collection
1766     gc_task_manager()->set_active_gang();
1767     gc_task_manager()->task_idle_workers();
1768 
1769     GCTraceCPUTime tcpu;
1770     GCTraceTime(Info, gc) tm("Pause Full", NULL, gc_cause, true);
1771     TraceCollectorStats tcs(counters());
1772     TraceMemoryManagerStats tms(true /* Full GC */,gc_cause);
1773 
1774     if (TraceOldGenTime) accumulated_time()->start();
1775 
1776     // Let the size policy know we're starting
1777     size_policy->major_collection_begin();
1778 
1779     CodeCache::gc_prologue();
1780 
1781 #if defined(COMPILER2) || INCLUDE_JVMCI
1782     DerivedPointerTable::clear();
1783 #endif
1784 
1785     ref_processor()->enable_discovery();
1786     ref_processor()->setup_policy(maximum_heap_compaction);
1787 
1788     bool marked_for_unloading = false;
1789 
1790     marking_start.update();
1791     marking_phase(vmthread_cm, maximum_heap_compaction, &_gc_tracer);
1792 
1793     bool max_on_system_gc = UseMaximumCompactionOnSystemGC
1794       && GCCause::is_user_requested_gc(gc_cause);
1795     summary_phase(vmthread_cm, maximum_heap_compaction || max_on_system_gc);
1796 
1797 #if defined(COMPILER2) || INCLUDE_JVMCI
1798     assert(DerivedPointerTable::is_active(), "Sanity");
1799     DerivedPointerTable::set_active(false);
1800 #endif
1801 
1802     // adjust_roots() updates Universe::_intArrayKlassObj which is
1803     // needed by the compaction for filling holes in the dense prefix.
1804     adjust_roots(vmthread_cm);
1805 
1806     compaction_start.update();
1807     compact();
1808 
1809     // Reset the mark bitmap, summary data, and do other bookkeeping.  Must be
1810     // done before resizing.
1811     post_compact();
1812 
1813     // Let the size policy know we're done
1814     size_policy->major_collection_end(old_gen->used_in_bytes(), gc_cause);
1815 
1816     if (UseAdaptiveSizePolicy) {
1817       log_debug(gc, ergo)("AdaptiveSizeStart: collection: %d ", heap->total_collections());
1818       log_trace(gc, ergo)("old_gen_capacity: " SIZE_FORMAT " young_gen_capacity: " SIZE_FORMAT,
1819                           old_gen->capacity_in_bytes(), young_gen->capacity_in_bytes());
1820 
1821       // Don't check if the size_policy is ready here.  Let
1822       // the size_policy check that internally.
1823       if (UseAdaptiveGenerationSizePolicyAtMajorCollection &&
1824           AdaptiveSizePolicy::should_update_promo_stats(gc_cause)) {
1825         // Swap the survivor spaces if from_space is empty. The
1826         // resize_young_gen() called below is normally used after
1827         // a successful young GC and swapping of survivor spaces;
1828         // otherwise, it will fail to resize the young gen with
1829         // the current implementation.
1830         if (young_gen->from_space()->is_empty()) {
1831           young_gen->from_space()->clear(SpaceDecorator::Mangle);
1832           young_gen->swap_spaces();
1833         }
1834 
1835         // Calculate optimal free space amounts
1836         assert(young_gen->max_size() >
1837           young_gen->from_space()->capacity_in_bytes() +
1838           young_gen->to_space()->capacity_in_bytes(),
1839           "Sizes of space in young gen are out-of-bounds");
1840 
1841         size_t young_live = young_gen->used_in_bytes();
1842         size_t eden_live = young_gen->eden_space()->used_in_bytes();
1843         size_t old_live = old_gen->used_in_bytes();
1844         size_t cur_eden = young_gen->eden_space()->capacity_in_bytes();
1845         size_t max_old_gen_size = old_gen->max_gen_size();
1846         size_t max_eden_size = young_gen->max_size() -
1847           young_gen->from_space()->capacity_in_bytes() -
1848           young_gen->to_space()->capacity_in_bytes();
1849 
1850         // Used for diagnostics
1851         size_policy->clear_generation_free_space_flags();
1852 
1853         size_policy->compute_generations_free_space(young_live,
1854                                                     eden_live,
1855                                                     old_live,
1856                                                     cur_eden,
1857                                                     max_old_gen_size,
1858                                                     max_eden_size,
1859                                                     true /* full gc*/);
1860 
1861         size_policy->check_gc_overhead_limit(young_live,
1862                                              eden_live,
1863                                              max_old_gen_size,
1864                                              max_eden_size,
1865                                              true /* full gc*/,
1866                                              gc_cause,
1867                                              heap->collector_policy());
1868 
1869         size_policy->decay_supplemental_growth(true /* full gc*/);
1870 
1871         heap->resize_old_gen(
1872           size_policy->calculated_old_free_size_in_bytes());
1873 
1874         heap->resize_young_gen(size_policy->calculated_eden_size_in_bytes(),
1875                                size_policy->calculated_survivor_size_in_bytes());
1876       }
1877 
1878       log_debug(gc, ergo)("AdaptiveSizeStop: collection: %d ", heap->total_collections());
1879     }
1880 
1881     if (UsePerfData) {
1882       PSGCAdaptivePolicyCounters* const counters = heap->gc_policy_counters();
1883       counters->update_counters();
1884       counters->update_old_capacity(old_gen->capacity_in_bytes());
1885       counters->update_young_capacity(young_gen->capacity_in_bytes());
1886     }
1887 
1888     heap->resize_all_tlabs();
1889 
1890     // Resize the metaspace capacity after a collection
1891     MetaspaceGC::compute_new_size();
1892 
1893     if (TraceOldGenTime) {
1894       accumulated_time()->stop();
1895     }
1896 
1897     young_gen->print_used_change(pre_gc_values.young_gen_used());
1898     old_gen->print_used_change(pre_gc_values.old_gen_used());
1899     MetaspaceAux::print_metaspace_change(pre_gc_values.metadata_used());
1900 
1901     // Track memory usage and detect low memory
1902     MemoryService::track_memory_usage();
1903     heap->update_counters();
1904     gc_task_manager()->release_idle_workers();
1905   }
1906 
1907 #ifdef ASSERT
1908   for (size_t i = 0; i < ParallelGCThreads + 1; ++i) {
1909     ParCompactionManager* const cm =
1910       ParCompactionManager::manager_array(int(i));
1911     assert(cm->marking_stack()->is_empty(),       "should be empty");
1912     assert(ParCompactionManager::region_list(int(i))->is_empty(), "should be empty");
1913   }
1914 #endif // ASSERT
1915 
1916   if (VerifyAfterGC && heap->total_collections() >= VerifyGCStartAt) {
1917     HandleMark hm;  // Discard invalid handles created during verification
1918     Universe::verify("After GC");
1919   }
1920 
1921   // Re-verify object start arrays
1922   if (VerifyObjectStartArray &&
1923       VerifyAfterGC) {
1924     old_gen->verify_object_start_array();
1925   }
1926 
1927   if (ZapUnusedHeapArea) {
1928     old_gen->object_space()->check_mangled_unused_area_complete();
1929   }
1930 
1931   NOT_PRODUCT(ref_processor()->verify_no_references_recorded());
1932 
1933   collection_exit.update();
1934 
1935   heap->print_heap_after_gc();
1936   heap->trace_heap_after_gc(&_gc_tracer);
1937 
1938   log_debug(gc, task, time)("VM-Thread " JLONG_FORMAT " " JLONG_FORMAT " " JLONG_FORMAT,
1939                          marking_start.ticks(), compaction_start.ticks(),
1940                          collection_exit.ticks());
1941   gc_task_manager()->print_task_time_stamps();
1942 
1943   heap->post_full_gc_dump(&_gc_timer);
1944 
1945 #ifdef TRACESPINNING
1946   ParallelTaskTerminator::print_termination_counts();
1947 #endif
1948 
1949   AdaptiveSizePolicyOutput::print(size_policy, heap->total_collections());
1950 
1951   _gc_timer.register_gc_end();
1952 
1953   _gc_tracer.report_dense_prefix(dense_prefix(old_space_id));
1954   _gc_tracer.report_gc_end(_gc_timer.gc_end(), _gc_timer.time_partitions());
1955 
1956   return true;
1957 }
1958 
1959 bool PSParallelCompact::absorb_live_data_from_eden(PSAdaptiveSizePolicy* size_policy,
1960                                              PSYoungGen* young_gen,
1961                                              PSOldGen* old_gen) {
1962   MutableSpace* const eden_space = young_gen->eden_space();
1963   assert(!eden_space->is_empty(), "eden must be non-empty");
1964   assert(young_gen->virtual_space()->alignment() ==
1965          old_gen->virtual_space()->alignment(), "alignments do not match");
1966 
1967   if (!(UseAdaptiveSizePolicy && UseAdaptiveGCBoundary)) {
1968     return false;
1969   }
1970 
1971   // Both generations must be completely committed.
1972   if (young_gen->virtual_space()->uncommitted_size() != 0) {
1973     return false;
1974   }
1975   if (old_gen->virtual_space()->uncommitted_size() != 0) {
1976     return false;
1977   }
1978 
1979   // Figure out how much to take from eden.  Include the average amount promoted
1980   // in the total; otherwise the next young gen GC will simply bail out to a
1981   // full GC.
1982   const size_t alignment = old_gen->virtual_space()->alignment();
1983   const size_t eden_used = eden_space->used_in_bytes();
1984   const size_t promoted = (size_t)size_policy->avg_promoted()->padded_average();
1985   const size_t absorb_size = align_size_up(eden_used + promoted, alignment);
1986   const size_t eden_capacity = eden_space->capacity_in_bytes();
1987 
1988   if (absorb_size >= eden_capacity) {
1989     return false; // Must leave some space in eden.
1990   }
1991 
1992   const size_t new_young_size = young_gen->capacity_in_bytes() - absorb_size;
1993   if (new_young_size < young_gen->min_gen_size()) {
1994     return false; // Respect young gen minimum size.
1995   }
1996 
1997   log_trace(heap, ergo)(" absorbing " SIZE_FORMAT "K:  "
1998                         "eden " SIZE_FORMAT "K->" SIZE_FORMAT "K "
1999                         "from " SIZE_FORMAT "K, to " SIZE_FORMAT "K "
2000                         "young_gen " SIZE_FORMAT "K->" SIZE_FORMAT "K ",
2001                         absorb_size / K,
2002                         eden_capacity / K, (eden_capacity - absorb_size) / K,
2003                         young_gen->from_space()->used_in_bytes() / K,
2004                         young_gen->to_space()->used_in_bytes() / K,
2005                         young_gen->capacity_in_bytes() / K, new_young_size / K);
2006 
2007   // Fill the unused part of the old gen.
2008   MutableSpace* const old_space = old_gen->object_space();
2009   HeapWord* const unused_start = old_space->top();
2010   size_t const unused_words = pointer_delta(old_space->end(), unused_start);
2011 
2012   if (unused_words > 0) {
2013     if (unused_words < CollectedHeap::min_fill_size()) {
2014       return false;  // If the old gen cannot be filled, must give up.
2015     }
2016     CollectedHeap::fill_with_objects(unused_start, unused_words);
2017   }
2018 
2019   // Take the live data from eden and set both top and end in the old gen to
2020   // eden top.  (Need to set end because reset_after_change() mangles the region
2021   // from end to virtual_space->high() in debug builds).
2022   HeapWord* const new_top = eden_space->top();
2023   old_gen->virtual_space()->expand_into(young_gen->virtual_space(),
2024                                         absorb_size);
2025   young_gen->reset_after_change();
2026   old_space->set_top(new_top);
2027   old_space->set_end(new_top);
2028   old_gen->reset_after_change();
2029 
2030   // Update the object start array for the filler object and the data from eden.
2031   ObjectStartArray* const start_array = old_gen->start_array();
2032   for (HeapWord* p = unused_start; p < new_top; p += oop(p)->size()) {
2033     start_array->allocate_block(p);
2034   }
2035 
2036   // Could update the promoted average here, but it is not typically updated at
2037   // full GCs and the value to use is unclear.  Something like
2038   //
2039   // cur_promoted_avg + absorb_size / number_of_scavenges_since_last_full_gc.
2040 
2041   size_policy->set_bytes_absorbed_from_eden(absorb_size);
2042   return true;
2043 }
2044 
2045 GCTaskManager* const PSParallelCompact::gc_task_manager() {
2046   assert(ParallelScavengeHeap::gc_task_manager() != NULL,
2047     "shouldn't return NULL");
2048   return ParallelScavengeHeap::gc_task_manager();
2049 }
2050 
2051 void PSParallelCompact::marking_phase(ParCompactionManager* cm,
2052                                       bool maximum_heap_compaction,
2053                                       ParallelOldTracer *gc_tracer) {
2054   // Recursively traverse all live objects and mark them
2055   GCTraceTime(Trace, gc, phases) tm("Marking Phase", &_gc_timer);
2056 
2057   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
2058   uint parallel_gc_threads = heap->gc_task_manager()->workers();
2059   uint active_gc_threads = heap->gc_task_manager()->active_workers();
2060   TaskQueueSetSuper* qset = ParCompactionManager::region_array();
2061   ParallelTaskTerminator terminator(active_gc_threads, qset);
2062 
2063   ParCompactionManager::MarkAndPushClosure mark_and_push_closure(cm);
2064   ParCompactionManager::FollowStackClosure follow_stack_closure(cm);
2065 
2066   // Need new claim bits before marking starts.
2067   ClassLoaderDataGraph::clear_claimed_marks();
2068 
2069   {
2070     GCTraceTime(Trace, gc, phases) tm("Par Mark", &_gc_timer);
2071 
2072     ParallelScavengeHeap::ParStrongRootsScope psrs;
2073 
2074     GCTaskQueue* q = GCTaskQueue::create();
2075 
2076     q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::universe));
2077     q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::jni_handles));
2078     // We scan the thread roots in parallel
2079     Threads::create_thread_roots_marking_tasks(q);
2080     q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::object_synchronizer));
2081     q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::flat_profiler));
2082     q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::management));
2083     q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::system_dictionary));
2084     q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::class_loader_data));
2085     q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::jvmti));
2086     q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::code_cache));
2087 
2088     if (active_gc_threads > 1) {
2089       for (uint j = 0; j < active_gc_threads; j++) {
2090         q->enqueue(new StealMarkingTask(&terminator));
2091       }
2092     }
2093 
2094     gc_task_manager()->execute_and_wait(q);
2095   }
2096 
2097   // Process reference objects found during marking
2098   {
2099     GCTraceTime(Trace, gc, phases) tm("Reference Processing", &_gc_timer);
2100 
2101     ReferenceProcessorStats stats;
2102     if (ref_processor()->processing_is_mt()) {
2103       RefProcTaskExecutor task_executor;
2104       stats = ref_processor()->process_discovered_references(
2105         is_alive_closure(), &mark_and_push_closure, &follow_stack_closure,
2106         &task_executor, &_gc_timer);
2107     } else {
2108       stats = ref_processor()->process_discovered_references(
2109         is_alive_closure(), &mark_and_push_closure, &follow_stack_closure, NULL,
2110         &_gc_timer);
2111     }
2112 
2113     gc_tracer->report_gc_reference_stats(stats);
2114   }
2115 
2116   GCTraceTime(Trace, gc) tm_m("Class Unloading", &_gc_timer);
2117 
2118   // This is the point where the entire marking should have completed.
2119   assert(cm->marking_stacks_empty(), "Marking should have completed");
2120 
2121   // Follow system dictionary roots and unload classes.
2122   bool purged_class = SystemDictionary::do_unloading(is_alive_closure());
2123 
2124   // Unload nmethods.
2125   CodeCache::do_unloading(is_alive_closure(), purged_class);
2126 
2127   // Prune dead klasses from subklass/sibling/implementor lists.
2128   Klass::clean_weak_klass_links(is_alive_closure());
2129 
2130   // Delete entries for dead interned strings.
2131   StringTable::unlink(is_alive_closure());
2132 
2133   // Clean up unreferenced symbols in symbol table.
2134   SymbolTable::unlink();
2135   _gc_tracer.report_object_count_after_gc(is_alive_closure());
2136 }
2137 
2138 // This should be moved to the shared markSweep code!
2139 class PSAlwaysTrueClosure: public BoolObjectClosure {
2140 public:
2141   bool do_object_b(oop p) { return true; }
2142 };
2143 static PSAlwaysTrueClosure always_true;
2144 
2145 void PSParallelCompact::adjust_roots(ParCompactionManager* cm) {
2146   // Adjust the pointers to reflect the new locations
2147   GCTraceTime(Trace, gc, phases) tm("Adjust Roots", &_gc_timer);
2148 
2149   // Need new claim bits when tracing through and adjusting pointers.
2150   ClassLoaderDataGraph::clear_claimed_marks();
2151 
2152   PSParallelCompact::AdjustPointerClosure closure(cm);
2153   PSParallelCompact::AdjustKlassClosure kclosure(cm);
2154 
2155   // General strong roots.
2156   Universe::oops_do(&closure);
2157   JNIHandles::oops_do(&closure);   // Global (strong) JNI handles
2158   CLDToOopClosure adjust_from_cld(&closure);
2159   Threads::oops_do(&closure, &adjust_from_cld, NULL);
2160   ObjectSynchronizer::oops_do(&closure);
2161   FlatProfiler::oops_do(&closure);
2162   Management::oops_do(&closure);
2163   JvmtiExport::oops_do(&closure);
2164   SystemDictionary::oops_do(&closure);
2165   ClassLoaderDataGraph::oops_do(&closure, &kclosure, true);
2166 
2167   // Now adjust pointers in remaining weak roots.  (All of which should
2168   // have been cleared if they pointed to non-surviving objects.)
2169   // Global (weak) JNI handles
2170   JNIHandles::weak_oops_do(&always_true, &closure);
2171 
2172   CodeBlobToOopClosure adjust_from_blobs(&closure, CodeBlobToOopClosure::FixRelocations);
2173   CodeCache::blobs_do(&adjust_from_blobs);
2174   StringTable::oops_do(&closure);
2175   ref_processor()->weak_oops_do(&closure);
2176   // Roots were visited so references into the young gen in roots
2177   // may have been scanned.  Process them also.
2178   // Should the reference processor have a span that excludes
2179   // young gen objects?
2180   PSScavenge::reference_processor()->weak_oops_do(&closure);
2181 }
2182 
2183 // Helper class to print 8 region numbers per line and then print the total at the end.
2184 class FillableRegionLogger : public StackObj {
2185 private:
2186   LogHandle(gc, compaction) log;
2187   static const int LineLength = 8;
2188   size_t _regions[LineLength];
2189   int _next_index;
2190   bool _enabled;
2191   size_t _total_regions;
2192 public:
2193   FillableRegionLogger() : _next_index(0), _total_regions(0), _enabled(develop_log_is_enabled(Trace, gc, compaction)) { }
2194   ~FillableRegionLogger() {
2195     log.trace(SIZE_FORMAT " initially fillable regions", _total_regions);
2196   }
2197 
2198   void print_line() {
2199     if (!_enabled || _next_index == 0) {
2200       return;
2201     }
2202     FormatBuffer<> line("Fillable: ");
2203     for (int i = 0; i < _next_index; i++) {
2204       line.append(" " SIZE_FORMAT_W(7), _regions[i]);
2205     }
2206     log.trace("%s", line.buffer());
2207     _next_index = 0;
2208   }
2209 
2210   void handle(size_t region) {
2211     if (!_enabled) {
2212       return;
2213     }
2214     _regions[_next_index++] = region;
2215     if (_next_index == LineLength) {
2216       print_line();
2217     }
2218     _total_regions++;
2219   }
2220 };
2221 
2222 void PSParallelCompact::enqueue_region_draining_tasks(GCTaskQueue* q,
2223                                                       uint parallel_gc_threads)
2224 {
2225   GCTraceTime(Trace, gc, phases) tm("Drain Task Setup", &_gc_timer);
2226 
2227   // Find the threads that are active
2228   unsigned int which = 0;
2229 
2230   const uint task_count = MAX2(parallel_gc_threads, 1U);
2231   for (uint j = 0; j < task_count; j++) {
2232     q->enqueue(new DrainStacksCompactionTask(j));
2233     ParCompactionManager::verify_region_list_empty(j);
2234     // Set the region stacks variables to "no" region stack values
2235     // so that they will be recognized and needing a region stack
2236     // in the stealing tasks if they do not get one by executing
2237     // a draining stack.
2238     ParCompactionManager* cm = ParCompactionManager::manager_array(j);
2239     cm->set_region_stack(NULL);
2240     cm->set_region_stack_index((uint)max_uintx);
2241   }
2242   ParCompactionManager::reset_recycled_stack_index();
2243 
2244   // Find all regions that are available (can be filled immediately) and
2245   // distribute them to the thread stacks.  The iteration is done in reverse
2246   // order (high to low) so the regions will be removed in ascending order.
2247 
2248   const ParallelCompactData& sd = PSParallelCompact::summary_data();
2249 
2250   // A region index which corresponds to the tasks created above.
2251   // "which" must be 0 <= which < task_count
2252 
2253   which = 0;
2254   // id + 1 is used to test termination so unsigned  can
2255   // be used with an old_space_id == 0.
2256   FillableRegionLogger region_logger;
2257   for (unsigned int id = to_space_id; id + 1 > old_space_id; --id) {
2258     SpaceInfo* const space_info = _space_info + id;
2259     MutableSpace* const space = space_info->space();
2260     HeapWord* const new_top = space_info->new_top();
2261 
2262     const size_t beg_region = sd.addr_to_region_idx(space_info->dense_prefix());
2263     const size_t end_region =
2264       sd.addr_to_region_idx(sd.region_align_up(new_top));
2265 
2266     for (size_t cur = end_region - 1; cur + 1 > beg_region; --cur) {
2267       if (sd.region(cur)->claim_unsafe()) {
2268         ParCompactionManager::region_list_push(which, cur);
2269         region_logger.handle(cur);
2270         // Assign regions to tasks in round-robin fashion.
2271         if (++which == task_count) {
2272           assert(which <= parallel_gc_threads,
2273             "Inconsistent number of workers");
2274           which = 0;
2275         }
2276       }
2277     }
2278     region_logger.print_line();
2279   }
2280 }
2281 
2282 #define PAR_OLD_DENSE_PREFIX_OVER_PARTITIONING 4
2283 
2284 void PSParallelCompact::enqueue_dense_prefix_tasks(GCTaskQueue* q,
2285                                                     uint parallel_gc_threads) {
2286   GCTraceTime(Trace, gc, phases) tm("Dense Prefix Task Setup", &_gc_timer);
2287 
2288   ParallelCompactData& sd = PSParallelCompact::summary_data();
2289 
2290   // Iterate over all the spaces adding tasks for updating
2291   // regions in the dense prefix.  Assume that 1 gc thread
2292   // will work on opening the gaps and the remaining gc threads
2293   // will work on the dense prefix.
2294   unsigned int space_id;
2295   for (space_id = old_space_id; space_id < last_space_id; ++ space_id) {
2296     HeapWord* const dense_prefix_end = _space_info[space_id].dense_prefix();
2297     const MutableSpace* const space = _space_info[space_id].space();
2298 
2299     if (dense_prefix_end == space->bottom()) {
2300       // There is no dense prefix for this space.
2301       continue;
2302     }
2303 
2304     // The dense prefix is before this region.
2305     size_t region_index_end_dense_prefix =
2306         sd.addr_to_region_idx(dense_prefix_end);
2307     RegionData* const dense_prefix_cp =
2308       sd.region(region_index_end_dense_prefix);
2309     assert(dense_prefix_end == space->end() ||
2310            dense_prefix_cp->available() ||
2311            dense_prefix_cp->claimed(),
2312            "The region after the dense prefix should always be ready to fill");
2313 
2314     size_t region_index_start = sd.addr_to_region_idx(space->bottom());
2315 
2316     // Is there dense prefix work?
2317     size_t total_dense_prefix_regions =
2318       region_index_end_dense_prefix - region_index_start;
2319     // How many regions of the dense prefix should be given to
2320     // each thread?
2321     if (total_dense_prefix_regions > 0) {
2322       uint tasks_for_dense_prefix = 1;
2323       if (total_dense_prefix_regions <=
2324           (parallel_gc_threads * PAR_OLD_DENSE_PREFIX_OVER_PARTITIONING)) {
2325         // Don't over partition.  This assumes that
2326         // PAR_OLD_DENSE_PREFIX_OVER_PARTITIONING is a small integer value
2327         // so there are not many regions to process.
2328         tasks_for_dense_prefix = parallel_gc_threads;
2329       } else {
2330         // Over partition
2331         tasks_for_dense_prefix = parallel_gc_threads *
2332           PAR_OLD_DENSE_PREFIX_OVER_PARTITIONING;
2333       }
2334       size_t regions_per_thread = total_dense_prefix_regions /
2335         tasks_for_dense_prefix;
2336       // Give each thread at least 1 region.
2337       if (regions_per_thread == 0) {
2338         regions_per_thread = 1;
2339       }
2340 
2341       for (uint k = 0; k < tasks_for_dense_prefix; k++) {
2342         if (region_index_start >= region_index_end_dense_prefix) {
2343           break;
2344         }
2345         // region_index_end is not processed
2346         size_t region_index_end = MIN2(region_index_start + regions_per_thread,
2347                                        region_index_end_dense_prefix);
2348         q->enqueue(new UpdateDensePrefixTask(SpaceId(space_id),
2349                                              region_index_start,
2350                                              region_index_end));
2351         region_index_start = region_index_end;
2352       }
2353     }
2354     // This gets any part of the dense prefix that did not
2355     // fit evenly.
2356     if (region_index_start < region_index_end_dense_prefix) {
2357       q->enqueue(new UpdateDensePrefixTask(SpaceId(space_id),
2358                                            region_index_start,
2359                                            region_index_end_dense_prefix));
2360     }
2361   }
2362 }
2363 
2364 void PSParallelCompact::enqueue_region_stealing_tasks(
2365                                      GCTaskQueue* q,
2366                                      ParallelTaskTerminator* terminator_ptr,
2367                                      uint parallel_gc_threads) {
2368   GCTraceTime(Trace, gc, phases) tm("Steal Task Setup", &_gc_timer);
2369 
2370   // Once a thread has drained it's stack, it should try to steal regions from
2371   // other threads.
2372   if (parallel_gc_threads > 1) {
2373     for (uint j = 0; j < parallel_gc_threads; j++) {
2374       q->enqueue(new StealRegionCompactionTask(terminator_ptr));
2375     }
2376   }
2377 }
2378 
2379 #ifdef ASSERT
2380 // Write a histogram of the number of times the block table was filled for a
2381 // region.
2382 void PSParallelCompact::write_block_fill_histogram()
2383 {
2384   if (!develop_log_is_enabled(Trace, gc, compaction)) {
2385     return;
2386   }
2387 
2388   LogHandle(gc, compaction) log;
2389   ResourceMark rm;
2390   outputStream* out = log.trace_stream();
2391 
2392   typedef ParallelCompactData::RegionData rd_t;
2393   ParallelCompactData& sd = summary_data();
2394 
2395   for (unsigned int id = old_space_id; id < last_space_id; ++id) {
2396     MutableSpace* const spc = _space_info[id].space();
2397     if (spc->bottom() != spc->top()) {
2398       const rd_t* const beg = sd.addr_to_region_ptr(spc->bottom());
2399       HeapWord* const top_aligned_up = sd.region_align_up(spc->top());
2400       const rd_t* const end = sd.addr_to_region_ptr(top_aligned_up);
2401 
2402       size_t histo[5] = { 0, 0, 0, 0, 0 };
2403       const size_t histo_len = sizeof(histo) / sizeof(size_t);
2404       const size_t region_cnt = pointer_delta(end, beg, sizeof(rd_t));
2405 
2406       for (const rd_t* cur = beg; cur < end; ++cur) {
2407         ++histo[MIN2(cur->blocks_filled_count(), histo_len - 1)];
2408       }
2409       out->print("Block fill histogram: %u %-4s" SIZE_FORMAT_W(5), id, space_names[id], region_cnt);
2410       for (size_t i = 0; i < histo_len; ++i) {
2411         out->print(" " SIZE_FORMAT_W(5) " %5.1f%%",
2412                    histo[i], 100.0 * histo[i] / region_cnt);
2413       }
2414       out->cr();
2415     }
2416   }
2417 }
2418 #endif // #ifdef ASSERT
2419 
2420 void PSParallelCompact::compact() {
2421   GCTraceTime(Trace, gc, phases) tm("Compaction Phase", &_gc_timer);
2422 
2423   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
2424   PSOldGen* old_gen = heap->old_gen();
2425   old_gen->start_array()->reset();
2426   uint parallel_gc_threads = heap->gc_task_manager()->workers();
2427   uint active_gc_threads = heap->gc_task_manager()->active_workers();
2428   TaskQueueSetSuper* qset = ParCompactionManager::region_array();
2429   ParallelTaskTerminator terminator(active_gc_threads, qset);
2430 
2431   GCTaskQueue* q = GCTaskQueue::create();
2432   enqueue_region_draining_tasks(q, active_gc_threads);
2433   enqueue_dense_prefix_tasks(q, active_gc_threads);
2434   enqueue_region_stealing_tasks(q, &terminator, active_gc_threads);
2435 
2436   {
2437     GCTraceTime(Trace, gc, phases) tm("Par Compact", &_gc_timer);
2438 
2439     gc_task_manager()->execute_and_wait(q);
2440 
2441 #ifdef  ASSERT
2442     // Verify that all regions have been processed before the deferred updates.
2443     for (unsigned int id = old_space_id; id < last_space_id; ++id) {
2444       verify_complete(SpaceId(id));
2445     }
2446 #endif
2447   }
2448 
2449   {
2450     // Update the deferred objects, if any.  Any compaction manager can be used.
2451     GCTraceTime(Trace, gc, phases) tm("Deferred Updates", &_gc_timer);
2452     ParCompactionManager* cm = ParCompactionManager::manager_array(0);
2453     for (unsigned int id = old_space_id; id < last_space_id; ++id) {
2454       update_deferred_objects(cm, SpaceId(id));
2455     }
2456   }
2457 
2458   DEBUG_ONLY(write_block_fill_histogram());
2459 }
2460 
2461 #ifdef  ASSERT
2462 void PSParallelCompact::verify_complete(SpaceId space_id) {
2463   // All Regions between space bottom() to new_top() should be marked as filled
2464   // and all Regions between new_top() and top() should be available (i.e.,
2465   // should have been emptied).
2466   ParallelCompactData& sd = summary_data();
2467   SpaceInfo si = _space_info[space_id];
2468   HeapWord* new_top_addr = sd.region_align_up(si.new_top());
2469   HeapWord* old_top_addr = sd.region_align_up(si.space()->top());
2470   const size_t beg_region = sd.addr_to_region_idx(si.space()->bottom());
2471   const size_t new_top_region = sd.addr_to_region_idx(new_top_addr);
2472   const size_t old_top_region = sd.addr_to_region_idx(old_top_addr);
2473 
2474   bool issued_a_warning = false;
2475 
2476   size_t cur_region;
2477   for (cur_region = beg_region; cur_region < new_top_region; ++cur_region) {
2478     const RegionData* const c = sd.region(cur_region);
2479     if (!c->completed()) {
2480       warning("region " SIZE_FORMAT " not filled:  "
2481               "destination_count=%u",
2482               cur_region, c->destination_count());
2483       issued_a_warning = true;
2484     }
2485   }
2486 
2487   for (cur_region = new_top_region; cur_region < old_top_region; ++cur_region) {
2488     const RegionData* const c = sd.region(cur_region);
2489     if (!c->available()) {
2490       warning("region " SIZE_FORMAT " not empty:   "
2491               "destination_count=%u",
2492               cur_region, c->destination_count());
2493       issued_a_warning = true;
2494     }
2495   }
2496 
2497   if (issued_a_warning) {
2498     print_region_ranges();
2499   }
2500 }
2501 #endif  // #ifdef ASSERT
2502 
2503 inline void UpdateOnlyClosure::do_addr(HeapWord* addr) {
2504   _start_array->allocate_block(addr);
2505   compaction_manager()->update_contents(oop(addr));
2506 }
2507 
2508 // Update interior oops in the ranges of regions [beg_region, end_region).
2509 void
2510 PSParallelCompact::update_and_deadwood_in_dense_prefix(ParCompactionManager* cm,
2511                                                        SpaceId space_id,
2512                                                        size_t beg_region,
2513                                                        size_t end_region) {
2514   ParallelCompactData& sd = summary_data();
2515   ParMarkBitMap* const mbm = mark_bitmap();
2516 
2517   HeapWord* beg_addr = sd.region_to_addr(beg_region);
2518   HeapWord* const end_addr = sd.region_to_addr(end_region);
2519   assert(beg_region <= end_region, "bad region range");
2520   assert(end_addr <= dense_prefix(space_id), "not in the dense prefix");
2521 
2522 #ifdef  ASSERT
2523   // Claim the regions to avoid triggering an assert when they are marked as
2524   // filled.
2525   for (size_t claim_region = beg_region; claim_region < end_region; ++claim_region) {
2526     assert(sd.region(claim_region)->claim_unsafe(), "claim() failed");
2527   }
2528 #endif  // #ifdef ASSERT
2529 
2530   if (beg_addr != space(space_id)->bottom()) {
2531     // Find the first live object or block of dead space that *starts* in this
2532     // range of regions.  If a partial object crosses onto the region, skip it;
2533     // it will be marked for 'deferred update' when the object head is
2534     // processed.  If dead space crosses onto the region, it is also skipped; it
2535     // will be filled when the prior region is processed.  If neither of those
2536     // apply, the first word in the region is the start of a live object or dead
2537     // space.
2538     assert(beg_addr > space(space_id)->bottom(), "sanity");
2539     const RegionData* const cp = sd.region(beg_region);
2540     if (cp->partial_obj_size() != 0) {
2541       beg_addr = sd.partial_obj_end(beg_region);
2542     } else if (dead_space_crosses_boundary(cp, mbm->addr_to_bit(beg_addr))) {
2543       beg_addr = mbm->find_obj_beg(beg_addr, end_addr);
2544     }
2545   }
2546 
2547   if (beg_addr < end_addr) {
2548     // A live object or block of dead space starts in this range of Regions.
2549      HeapWord* const dense_prefix_end = dense_prefix(space_id);
2550 
2551     // Create closures and iterate.
2552     UpdateOnlyClosure update_closure(mbm, cm, space_id);
2553     FillClosure fill_closure(cm, space_id);
2554     ParMarkBitMap::IterationStatus status;
2555     status = mbm->iterate(&update_closure, &fill_closure, beg_addr, end_addr,
2556                           dense_prefix_end);
2557     if (status == ParMarkBitMap::incomplete) {
2558       update_closure.do_addr(update_closure.source());
2559     }
2560   }
2561 
2562   // Mark the regions as filled.
2563   RegionData* const beg_cp = sd.region(beg_region);
2564   RegionData* const end_cp = sd.region(end_region);
2565   for (RegionData* cp = beg_cp; cp < end_cp; ++cp) {
2566     cp->set_completed();
2567   }
2568 }
2569 
2570 // Return the SpaceId for the space containing addr.  If addr is not in the
2571 // heap, last_space_id is returned.  In debug mode it expects the address to be
2572 // in the heap and asserts such.
2573 PSParallelCompact::SpaceId PSParallelCompact::space_id(HeapWord* addr) {
2574   assert(ParallelScavengeHeap::heap()->is_in_reserved(addr), "addr not in the heap");
2575 
2576   for (unsigned int id = old_space_id; id < last_space_id; ++id) {
2577     if (_space_info[id].space()->contains(addr)) {
2578       return SpaceId(id);
2579     }
2580   }
2581 
2582   assert(false, "no space contains the addr");
2583   return last_space_id;
2584 }
2585 
2586 void PSParallelCompact::update_deferred_objects(ParCompactionManager* cm,
2587                                                 SpaceId id) {
2588   assert(id < last_space_id, "bad space id");
2589 
2590   ParallelCompactData& sd = summary_data();
2591   const SpaceInfo* const space_info = _space_info + id;
2592   ObjectStartArray* const start_array = space_info->start_array();
2593 
2594   const MutableSpace* const space = space_info->space();
2595   assert(space_info->dense_prefix() >= space->bottom(), "dense_prefix not set");
2596   HeapWord* const beg_addr = space_info->dense_prefix();
2597   HeapWord* const end_addr = sd.region_align_up(space_info->new_top());
2598 
2599   const RegionData* const beg_region = sd.addr_to_region_ptr(beg_addr);
2600   const RegionData* const end_region = sd.addr_to_region_ptr(end_addr);
2601   const RegionData* cur_region;
2602   for (cur_region = beg_region; cur_region < end_region; ++cur_region) {
2603     HeapWord* const addr = cur_region->deferred_obj_addr();
2604     if (addr != NULL) {
2605       if (start_array != NULL) {
2606         start_array->allocate_block(addr);
2607       }
2608       cm->update_contents(oop(addr));
2609       assert(oop(addr)->is_oop_or_null(), "Expected an oop or NULL at " PTR_FORMAT, p2i(oop(addr)));
2610     }
2611   }
2612 }
2613 
2614 // Skip over count live words starting from beg, and return the address of the
2615 // next live word.  Unless marked, the word corresponding to beg is assumed to
2616 // be dead.  Callers must either ensure beg does not correspond to the middle of
2617 // an object, or account for those live words in some other way.  Callers must
2618 // also ensure that there are enough live words in the range [beg, end) to skip.
2619 HeapWord*
2620 PSParallelCompact::skip_live_words(HeapWord* beg, HeapWord* end, size_t count)
2621 {
2622   assert(count > 0, "sanity");
2623 
2624   ParMarkBitMap* m = mark_bitmap();
2625   idx_t bits_to_skip = m->words_to_bits(count);
2626   idx_t cur_beg = m->addr_to_bit(beg);
2627   const idx_t search_end = BitMap::word_align_up(m->addr_to_bit(end));
2628 
2629   do {
2630     cur_beg = m->find_obj_beg(cur_beg, search_end);
2631     idx_t cur_end = m->find_obj_end(cur_beg, search_end);
2632     const size_t obj_bits = cur_end - cur_beg + 1;
2633     if (obj_bits > bits_to_skip) {
2634       return m->bit_to_addr(cur_beg + bits_to_skip);
2635     }
2636     bits_to_skip -= obj_bits;
2637     cur_beg = cur_end + 1;
2638   } while (bits_to_skip > 0);
2639 
2640   // Skipping the desired number of words landed just past the end of an object.
2641   // Find the start of the next object.
2642   cur_beg = m->find_obj_beg(cur_beg, search_end);
2643   assert(cur_beg < m->addr_to_bit(end), "not enough live words to skip");
2644   return m->bit_to_addr(cur_beg);
2645 }
2646 
2647 HeapWord* PSParallelCompact::first_src_addr(HeapWord* const dest_addr,
2648                                             SpaceId src_space_id,
2649                                             size_t src_region_idx)
2650 {
2651   assert(summary_data().is_region_aligned(dest_addr), "not aligned");
2652 
2653   const SplitInfo& split_info = _space_info[src_space_id].split_info();
2654   if (split_info.dest_region_addr() == dest_addr) {
2655     // The partial object ending at the split point contains the first word to
2656     // be copied to dest_addr.
2657     return split_info.first_src_addr();
2658   }
2659 
2660   const ParallelCompactData& sd = summary_data();
2661   ParMarkBitMap* const bitmap = mark_bitmap();
2662   const size_t RegionSize = ParallelCompactData::RegionSize;
2663 
2664   assert(sd.is_region_aligned(dest_addr), "not aligned");
2665   const RegionData* const src_region_ptr = sd.region(src_region_idx);
2666   const size_t partial_obj_size = src_region_ptr->partial_obj_size();
2667   HeapWord* const src_region_destination = src_region_ptr->destination();
2668 
2669   assert(dest_addr >= src_region_destination, "wrong src region");
2670   assert(src_region_ptr->data_size() > 0, "src region cannot be empty");
2671 
2672   HeapWord* const src_region_beg = sd.region_to_addr(src_region_idx);
2673   HeapWord* const src_region_end = src_region_beg + RegionSize;
2674 
2675   HeapWord* addr = src_region_beg;
2676   if (dest_addr == src_region_destination) {
2677     // Return the first live word in the source region.
2678     if (partial_obj_size == 0) {
2679       addr = bitmap->find_obj_beg(addr, src_region_end);
2680       assert(addr < src_region_end, "no objects start in src region");
2681     }
2682     return addr;
2683   }
2684 
2685   // Must skip some live data.
2686   size_t words_to_skip = dest_addr - src_region_destination;
2687   assert(src_region_ptr->data_size() > words_to_skip, "wrong src region");
2688 
2689   if (partial_obj_size >= words_to_skip) {
2690     // All the live words to skip are part of the partial object.
2691     addr += words_to_skip;
2692     if (partial_obj_size == words_to_skip) {
2693       // Find the first live word past the partial object.
2694       addr = bitmap->find_obj_beg(addr, src_region_end);
2695       assert(addr < src_region_end, "wrong src region");
2696     }
2697     return addr;
2698   }
2699 
2700   // Skip over the partial object (if any).
2701   if (partial_obj_size != 0) {
2702     words_to_skip -= partial_obj_size;
2703     addr += partial_obj_size;
2704   }
2705 
2706   // Skip over live words due to objects that start in the region.
2707   addr = skip_live_words(addr, src_region_end, words_to_skip);
2708   assert(addr < src_region_end, "wrong src region");
2709   return addr;
2710 }
2711 
2712 void PSParallelCompact::decrement_destination_counts(ParCompactionManager* cm,
2713                                                      SpaceId src_space_id,
2714                                                      size_t beg_region,
2715                                                      HeapWord* end_addr)
2716 {
2717   ParallelCompactData& sd = summary_data();
2718 
2719 #ifdef ASSERT
2720   MutableSpace* const src_space = _space_info[src_space_id].space();
2721   HeapWord* const beg_addr = sd.region_to_addr(beg_region);
2722   assert(src_space->contains(beg_addr) || beg_addr == src_space->end(),
2723          "src_space_id does not match beg_addr");
2724   assert(src_space->contains(end_addr) || end_addr == src_space->end(),
2725          "src_space_id does not match end_addr");
2726 #endif // #ifdef ASSERT
2727 
2728   RegionData* const beg = sd.region(beg_region);
2729   RegionData* const end = sd.addr_to_region_ptr(sd.region_align_up(end_addr));
2730 
2731   // Regions up to new_top() are enqueued if they become available.
2732   HeapWord* const new_top = _space_info[src_space_id].new_top();
2733   RegionData* const enqueue_end =
2734     sd.addr_to_region_ptr(sd.region_align_up(new_top));
2735 
2736   for (RegionData* cur = beg; cur < end; ++cur) {
2737     assert(cur->data_size() > 0, "region must have live data");
2738     cur->decrement_destination_count();
2739     if (cur < enqueue_end && cur->available() && cur->claim()) {
2740       cm->push_region(sd.region(cur));
2741     }
2742   }
2743 }
2744 
2745 size_t PSParallelCompact::next_src_region(MoveAndUpdateClosure& closure,
2746                                           SpaceId& src_space_id,
2747                                           HeapWord*& src_space_top,
2748                                           HeapWord* end_addr)
2749 {
2750   typedef ParallelCompactData::RegionData RegionData;
2751 
2752   ParallelCompactData& sd = PSParallelCompact::summary_data();
2753   const size_t region_size = ParallelCompactData::RegionSize;
2754 
2755   size_t src_region_idx = 0;
2756 
2757   // Skip empty regions (if any) up to the top of the space.
2758   HeapWord* const src_aligned_up = sd.region_align_up(end_addr);
2759   RegionData* src_region_ptr = sd.addr_to_region_ptr(src_aligned_up);
2760   HeapWord* const top_aligned_up = sd.region_align_up(src_space_top);
2761   const RegionData* const top_region_ptr =
2762     sd.addr_to_region_ptr(top_aligned_up);
2763   while (src_region_ptr < top_region_ptr && src_region_ptr->data_size() == 0) {
2764     ++src_region_ptr;
2765   }
2766 
2767   if (src_region_ptr < top_region_ptr) {
2768     // The next source region is in the current space.  Update src_region_idx
2769     // and the source address to match src_region_ptr.
2770     src_region_idx = sd.region(src_region_ptr);
2771     HeapWord* const src_region_addr = sd.region_to_addr(src_region_idx);
2772     if (src_region_addr > closure.source()) {
2773       closure.set_source(src_region_addr);
2774     }
2775     return src_region_idx;
2776   }
2777 
2778   // Switch to a new source space and find the first non-empty region.
2779   unsigned int space_id = src_space_id + 1;
2780   assert(space_id < last_space_id, "not enough spaces");
2781 
2782   HeapWord* const destination = closure.destination();
2783 
2784   do {
2785     MutableSpace* space = _space_info[space_id].space();
2786     HeapWord* const bottom = space->bottom();
2787     const RegionData* const bottom_cp = sd.addr_to_region_ptr(bottom);
2788 
2789     // Iterate over the spaces that do not compact into themselves.
2790     if (bottom_cp->destination() != bottom) {
2791       HeapWord* const top_aligned_up = sd.region_align_up(space->top());
2792       const RegionData* const top_cp = sd.addr_to_region_ptr(top_aligned_up);
2793 
2794       for (const RegionData* src_cp = bottom_cp; src_cp < top_cp; ++src_cp) {
2795         if (src_cp->live_obj_size() > 0) {
2796           // Found it.
2797           assert(src_cp->destination() == destination,
2798                  "first live obj in the space must match the destination");
2799           assert(src_cp->partial_obj_size() == 0,
2800                  "a space cannot begin with a partial obj");
2801 
2802           src_space_id = SpaceId(space_id);
2803           src_space_top = space->top();
2804           const size_t src_region_idx = sd.region(src_cp);
2805           closure.set_source(sd.region_to_addr(src_region_idx));
2806           return src_region_idx;
2807         } else {
2808           assert(src_cp->data_size() == 0, "sanity");
2809         }
2810       }
2811     }
2812   } while (++space_id < last_space_id);
2813 
2814   assert(false, "no source region was found");
2815   return 0;
2816 }
2817 
2818 void PSParallelCompact::fill_region(ParCompactionManager* cm, size_t region_idx)
2819 {
2820   typedef ParMarkBitMap::IterationStatus IterationStatus;
2821   const size_t RegionSize = ParallelCompactData::RegionSize;
2822   ParMarkBitMap* const bitmap = mark_bitmap();
2823   ParallelCompactData& sd = summary_data();
2824   RegionData* const region_ptr = sd.region(region_idx);
2825 
2826   // Get the items needed to construct the closure.
2827   HeapWord* dest_addr = sd.region_to_addr(region_idx);
2828   SpaceId dest_space_id = space_id(dest_addr);
2829   ObjectStartArray* start_array = _space_info[dest_space_id].start_array();
2830   HeapWord* new_top = _space_info[dest_space_id].new_top();
2831   assert(dest_addr < new_top, "sanity");
2832   const size_t words = MIN2(pointer_delta(new_top, dest_addr), RegionSize);
2833 
2834   // Get the source region and related info.
2835   size_t src_region_idx = region_ptr->source_region();
2836   SpaceId src_space_id = space_id(sd.region_to_addr(src_region_idx));
2837   HeapWord* src_space_top = _space_info[src_space_id].space()->top();
2838 
2839   MoveAndUpdateClosure closure(bitmap, cm, start_array, dest_addr, words);
2840   closure.set_source(first_src_addr(dest_addr, src_space_id, src_region_idx));
2841 
2842   // Adjust src_region_idx to prepare for decrementing destination counts (the
2843   // destination count is not decremented when a region is copied to itself).
2844   if (src_region_idx == region_idx) {
2845     src_region_idx += 1;
2846   }
2847 
2848   if (bitmap->is_unmarked(closure.source())) {
2849     // The first source word is in the middle of an object; copy the remainder
2850     // of the object or as much as will fit.  The fact that pointer updates were
2851     // deferred will be noted when the object header is processed.
2852     HeapWord* const old_src_addr = closure.source();
2853     closure.copy_partial_obj();
2854     if (closure.is_full()) {
2855       decrement_destination_counts(cm, src_space_id, src_region_idx,
2856                                    closure.source());
2857       region_ptr->set_deferred_obj_addr(NULL);
2858       region_ptr->set_completed();
2859       return;
2860     }
2861 
2862     HeapWord* const end_addr = sd.region_align_down(closure.source());
2863     if (sd.region_align_down(old_src_addr) != end_addr) {
2864       // The partial object was copied from more than one source region.
2865       decrement_destination_counts(cm, src_space_id, src_region_idx, end_addr);
2866 
2867       // Move to the next source region, possibly switching spaces as well.  All
2868       // args except end_addr may be modified.
2869       src_region_idx = next_src_region(closure, src_space_id, src_space_top,
2870                                        end_addr);
2871     }
2872   }
2873 
2874   do {
2875     HeapWord* const cur_addr = closure.source();
2876     HeapWord* const end_addr = MIN2(sd.region_align_up(cur_addr + 1),
2877                                     src_space_top);
2878     IterationStatus status = bitmap->iterate(&closure, cur_addr, end_addr);
2879 
2880     if (status == ParMarkBitMap::incomplete) {
2881       // The last obj that starts in the source region does not end in the
2882       // region.
2883       assert(closure.source() < end_addr, "sanity");
2884       HeapWord* const obj_beg = closure.source();
2885       HeapWord* const range_end = MIN2(obj_beg + closure.words_remaining(),
2886                                        src_space_top);
2887       HeapWord* const obj_end = bitmap->find_obj_end(obj_beg, range_end);
2888       if (obj_end < range_end) {
2889         // The end was found; the entire object will fit.
2890         status = closure.do_addr(obj_beg, bitmap->obj_size(obj_beg, obj_end));
2891         assert(status != ParMarkBitMap::would_overflow, "sanity");
2892       } else {
2893         // The end was not found; the object will not fit.
2894         assert(range_end < src_space_top, "obj cannot cross space boundary");
2895         status = ParMarkBitMap::would_overflow;
2896       }
2897     }
2898 
2899     if (status == ParMarkBitMap::would_overflow) {
2900       // The last object did not fit.  Note that interior oop updates were
2901       // deferred, then copy enough of the object to fill the region.
2902       region_ptr->set_deferred_obj_addr(closure.destination());
2903       status = closure.copy_until_full(); // copies from closure.source()
2904 
2905       decrement_destination_counts(cm, src_space_id, src_region_idx,
2906                                    closure.source());
2907       region_ptr->set_completed();
2908       return;
2909     }
2910 
2911     if (status == ParMarkBitMap::full) {
2912       decrement_destination_counts(cm, src_space_id, src_region_idx,
2913                                    closure.source());
2914       region_ptr->set_deferred_obj_addr(NULL);
2915       region_ptr->set_completed();
2916       return;
2917     }
2918 
2919     decrement_destination_counts(cm, src_space_id, src_region_idx, end_addr);
2920 
2921     // Move to the next source region, possibly switching spaces as well.  All
2922     // args except end_addr may be modified.
2923     src_region_idx = next_src_region(closure, src_space_id, src_space_top,
2924                                      end_addr);
2925   } while (true);
2926 }
2927 
2928 void PSParallelCompact::fill_blocks(size_t region_idx)
2929 {
2930   // Fill in the block table elements for the specified region.  Each block
2931   // table element holds the number of live words in the region that are to the
2932   // left of the first object that starts in the block.  Thus only blocks in
2933   // which an object starts need to be filled.
2934   //
2935   // The algorithm scans the section of the bitmap that corresponds to the
2936   // region, keeping a running total of the live words.  When an object start is
2937   // found, if it's the first to start in the block that contains it, the
2938   // current total is written to the block table element.
2939   const size_t Log2BlockSize = ParallelCompactData::Log2BlockSize;
2940   const size_t Log2RegionSize = ParallelCompactData::Log2RegionSize;
2941   const size_t RegionSize = ParallelCompactData::RegionSize;
2942 
2943   ParallelCompactData& sd = summary_data();
2944   const size_t partial_obj_size = sd.region(region_idx)->partial_obj_size();
2945   if (partial_obj_size >= RegionSize) {
2946     return; // No objects start in this region.
2947   }
2948 
2949   // Ensure the first loop iteration decides that the block has changed.
2950   size_t cur_block = sd.block_count();
2951 
2952   const ParMarkBitMap* const bitmap = mark_bitmap();
2953 
2954   const size_t Log2BitsPerBlock = Log2BlockSize - LogMinObjAlignment;
2955   assert((size_t)1 << Log2BitsPerBlock ==
2956          bitmap->words_to_bits(ParallelCompactData::BlockSize), "sanity");
2957 
2958   size_t beg_bit = bitmap->words_to_bits(region_idx << Log2RegionSize);
2959   const size_t range_end = beg_bit + bitmap->words_to_bits(RegionSize);
2960   size_t live_bits = bitmap->words_to_bits(partial_obj_size);
2961   beg_bit = bitmap->find_obj_beg(beg_bit + live_bits, range_end);
2962   while (beg_bit < range_end) {
2963     const size_t new_block = beg_bit >> Log2BitsPerBlock;
2964     if (new_block != cur_block) {
2965       cur_block = new_block;
2966       sd.block(cur_block)->set_offset(bitmap->bits_to_words(live_bits));
2967     }
2968 
2969     const size_t end_bit = bitmap->find_obj_end(beg_bit, range_end);
2970     if (end_bit < range_end - 1) {
2971       live_bits += end_bit - beg_bit + 1;
2972       beg_bit = bitmap->find_obj_beg(end_bit + 1, range_end);
2973     } else {
2974       return;
2975     }
2976   }
2977 }
2978 
2979 void
2980 PSParallelCompact::move_and_update(ParCompactionManager* cm, SpaceId space_id) {
2981   const MutableSpace* sp = space(space_id);
2982   if (sp->is_empty()) {
2983     return;
2984   }
2985 
2986   ParallelCompactData& sd = PSParallelCompact::summary_data();
2987   ParMarkBitMap* const bitmap = mark_bitmap();
2988   HeapWord* const dp_addr = dense_prefix(space_id);
2989   HeapWord* beg_addr = sp->bottom();
2990   HeapWord* end_addr = sp->top();
2991 
2992   assert(beg_addr <= dp_addr && dp_addr <= end_addr, "bad dense prefix");
2993 
2994   const size_t beg_region = sd.addr_to_region_idx(beg_addr);
2995   const size_t dp_region = sd.addr_to_region_idx(dp_addr);
2996   if (beg_region < dp_region) {
2997     update_and_deadwood_in_dense_prefix(cm, space_id, beg_region, dp_region);
2998   }
2999 
3000   // The destination of the first live object that starts in the region is one
3001   // past the end of the partial object entering the region (if any).
3002   HeapWord* const dest_addr = sd.partial_obj_end(dp_region);
3003   HeapWord* const new_top = _space_info[space_id].new_top();
3004   assert(new_top >= dest_addr, "bad new_top value");
3005   const size_t words = pointer_delta(new_top, dest_addr);
3006 
3007   if (words > 0) {
3008     ObjectStartArray* start_array = _space_info[space_id].start_array();
3009     MoveAndUpdateClosure closure(bitmap, cm, start_array, dest_addr, words);
3010 
3011     ParMarkBitMap::IterationStatus status;
3012     status = bitmap->iterate(&closure, dest_addr, end_addr);
3013     assert(status == ParMarkBitMap::full, "iteration not complete");
3014     assert(bitmap->find_obj_beg(closure.source(), end_addr) == end_addr,
3015            "live objects skipped because closure is full");
3016   }
3017 }
3018 
3019 jlong PSParallelCompact::millis_since_last_gc() {
3020   // We need a monotonically non-decreasing time in ms but
3021   // os::javaTimeMillis() does not guarantee monotonicity.
3022   jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
3023   jlong ret_val = now - _time_of_last_gc;
3024   // XXX See note in genCollectedHeap::millis_since_last_gc().
3025   if (ret_val < 0) {
3026     NOT_PRODUCT(warning("time warp: " JLONG_FORMAT, ret_val);)
3027     return 0;
3028   }
3029   return ret_val;
3030 }
3031 
3032 void PSParallelCompact::reset_millis_since_last_gc() {
3033   // We need a monotonically non-decreasing time in ms but
3034   // os::javaTimeMillis() does not guarantee monotonicity.
3035   _time_of_last_gc = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
3036 }
3037 
3038 ParMarkBitMap::IterationStatus MoveAndUpdateClosure::copy_until_full()
3039 {
3040   if (source() != destination()) {
3041     DEBUG_ONLY(PSParallelCompact::check_new_location(source(), destination());)
3042     Copy::aligned_conjoint_words(source(), destination(), words_remaining());
3043   }
3044   update_state(words_remaining());
3045   assert(is_full(), "sanity");
3046   return ParMarkBitMap::full;
3047 }
3048 
3049 void MoveAndUpdateClosure::copy_partial_obj()
3050 {
3051   size_t words = words_remaining();
3052 
3053   HeapWord* const range_end = MIN2(source() + words, bitmap()->region_end());
3054   HeapWord* const end_addr = bitmap()->find_obj_end(source(), range_end);
3055   if (end_addr < range_end) {
3056     words = bitmap()->obj_size(source(), end_addr);
3057   }
3058 
3059   // This test is necessary; if omitted, the pointer updates to a partial object
3060   // that crosses the dense prefix boundary could be overwritten.
3061   if (source() != destination()) {
3062     DEBUG_ONLY(PSParallelCompact::check_new_location(source(), destination());)
3063     Copy::aligned_conjoint_words(source(), destination(), words);
3064   }
3065   update_state(words);
3066 }
3067 
3068 void InstanceKlass::oop_pc_update_pointers(oop obj, ParCompactionManager* cm) {
3069   PSParallelCompact::AdjustPointerClosure closure(cm);
3070   oop_oop_iterate_oop_maps<true>(obj, &closure);
3071 }
3072 
3073 void InstanceMirrorKlass::oop_pc_update_pointers(oop obj, ParCompactionManager* cm) {
3074   InstanceKlass::oop_pc_update_pointers(obj, cm);
3075 
3076   PSParallelCompact::AdjustPointerClosure closure(cm); 
3077   oop_oop_iterate_statics<true>(obj, &closure);
3078 }
3079 
3080 void InstanceClassLoaderKlass::oop_pc_update_pointers(oop obj, ParCompactionManager* cm) {
3081   InstanceKlass::oop_pc_update_pointers(obj, cm);
3082 }
3083 
3084 #ifdef ASSERT
3085 template <class T> static void trace_reference_gc(const char *s, oop obj,
3086                                                   T* referent_addr,
3087                                                   T* next_addr,
3088                                                   T* discovered_addr) {
3089   log_develop_trace(gc, ref)("%s obj " PTR_FORMAT, s, p2i(obj));
3090   log_develop_trace(gc, ref)("     referent_addr/* " PTR_FORMAT " / " PTR_FORMAT,
3091                              p2i(referent_addr), referent_addr ? p2i(oopDesc::load_decode_heap_oop(referent_addr)) : NULL);
3092   log_develop_trace(gc, ref)("     next_addr/* " PTR_FORMAT " / " PTR_FORMAT,
3093                              p2i(next_addr), next_addr ? p2i(oopDesc::load_decode_heap_oop(next_addr)) : NULL);
3094   log_develop_trace(gc, ref)("     discovered_addr/* " PTR_FORMAT " / " PTR_FORMAT,
3095                              p2i(discovered_addr), discovered_addr ? p2i(oopDesc::load_decode_heap_oop(discovered_addr)) : NULL);
3096 }
3097 #endif
3098 
3099 template <class T>
3100 static void oop_pc_update_pointers_specialized(oop obj, ParCompactionManager* cm) {
3101   T* referent_addr = (T*)java_lang_ref_Reference::referent_addr(obj);
3102   PSParallelCompact::adjust_pointer(referent_addr, cm);
3103   T* next_addr = (T*)java_lang_ref_Reference::next_addr(obj);
3104   PSParallelCompact::adjust_pointer(next_addr, cm);
3105   T* discovered_addr = (T*)java_lang_ref_Reference::discovered_addr(obj);
3106   PSParallelCompact::adjust_pointer(discovered_addr, cm);
3107   debug_only(trace_reference_gc("InstanceRefKlass::oop_update_ptrs", obj,
3108                                 referent_addr, next_addr, discovered_addr);)
3109 }
3110 
3111 void InstanceRefKlass::oop_pc_update_pointers(oop obj, ParCompactionManager* cm) {
3112   InstanceKlass::oop_pc_update_pointers(obj, cm);
3113 
3114   if (UseCompressedOops) {
3115     oop_pc_update_pointers_specialized<narrowOop>(obj, cm);
3116   } else {
3117     oop_pc_update_pointers_specialized<oop>(obj, cm);
3118   }
3119 }
3120 
3121 void ObjArrayKlass::oop_pc_update_pointers(oop obj, ParCompactionManager* cm) {
3122   assert(obj->is_objArray(), "obj must be obj array");
3123   PSParallelCompact::AdjustPointerClosure closure(cm);
3124   oop_oop_iterate_elements<true>(objArrayOop(obj), &closure);
3125 }
3126 
3127 void TypeArrayKlass::oop_pc_update_pointers(oop obj, ParCompactionManager* cm) {
3128   assert(obj->is_typeArray(),"must be a type array");
3129 }
3130 
3131 ParMarkBitMapClosure::IterationStatus
3132 MoveAndUpdateClosure::do_addr(HeapWord* addr, size_t words) {
3133   assert(destination() != NULL, "sanity");
3134   assert(bitmap()->obj_size(addr) == words, "bad size");
3135 
3136   _source = addr;
3137   assert(PSParallelCompact::summary_data().calc_new_pointer(source(), compaction_manager()) ==
3138          destination(), "wrong destination");
3139 
3140   if (words > words_remaining()) {
3141     return ParMarkBitMap::would_overflow;
3142   }
3143 
3144   // The start_array must be updated even if the object is not moving.
3145   if (_start_array != NULL) {
3146     _start_array->allocate_block(destination());
3147   }
3148 
3149   if (destination() != source()) {
3150     DEBUG_ONLY(PSParallelCompact::check_new_location(source(), destination());)
3151     Copy::aligned_conjoint_words(source(), destination(), words);
3152   }
3153 
3154   oop moved_oop = (oop) destination();
3155   compaction_manager()->update_contents(moved_oop);
3156   assert(moved_oop->is_oop_or_null(), "Expected an oop or NULL at " PTR_FORMAT, p2i(moved_oop));
3157 
3158   update_state(words);
3159   assert(destination() == (HeapWord*)moved_oop + moved_oop->size(), "sanity");
3160   return is_full() ? ParMarkBitMap::full : ParMarkBitMap::incomplete;
3161 }
3162 
3163 UpdateOnlyClosure::UpdateOnlyClosure(ParMarkBitMap* mbm,
3164                                      ParCompactionManager* cm,
3165                                      PSParallelCompact::SpaceId space_id) :
3166   ParMarkBitMapClosure(mbm, cm),
3167   _space_id(space_id),
3168   _start_array(PSParallelCompact::start_array(space_id))
3169 {
3170 }
3171 
3172 // Updates the references in the object to their new values.
3173 ParMarkBitMapClosure::IterationStatus
3174 UpdateOnlyClosure::do_addr(HeapWord* addr, size_t words) {
3175   do_addr(addr);
3176   return ParMarkBitMap::incomplete;
3177 }
--- EOF ---