rev 9846 : [mq]: par-scav-patch
rev 9847 : 8146987: Improve Parallel GC Full GC by caching results of live_words_in_range()
Summary: A large part of time in the parallel scavenge collector is spent finding out the amount of live words within memory ranges to find out where to move an object to. Try to incrementally calculate this value.
Reviewed-by: tschatzl, mgerdin
Contributed-by: ray alex <sky1young@gmail.com>

   1 /*
   2  * Copyright (c) 2005, 2015, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/stringTable.hpp"
  27 #include "classfile/symbolTable.hpp"
  28 #include "classfile/systemDictionary.hpp"
  29 #include "code/codeCache.hpp"
  30 #include "gc/parallel/gcTaskManager.hpp"
  31 #include "gc/parallel/parallelScavengeHeap.inline.hpp"
  32 #include "gc/parallel/pcTasks.hpp"
  33 #include "gc/parallel/psAdaptiveSizePolicy.hpp"
  34 #include "gc/parallel/psCompactionManager.inline.hpp"
  35 #include "gc/parallel/psMarkSweep.hpp"
  36 #include "gc/parallel/psMarkSweepDecorator.hpp"
  37 #include "gc/parallel/psOldGen.hpp"
  38 #include "gc/parallel/psParallelCompact.inline.hpp"
  39 #include "gc/parallel/psPromotionManager.inline.hpp"
  40 #include "gc/parallel/psScavenge.hpp"
  41 #include "gc/parallel/psYoungGen.hpp"
  42 #include "gc/shared/gcCause.hpp"
  43 #include "gc/shared/gcHeapSummary.hpp"
  44 #include "gc/shared/gcId.hpp"
  45 #include "gc/shared/gcLocker.inline.hpp"
  46 #include "gc/shared/gcTimer.hpp"
  47 #include "gc/shared/gcTrace.hpp"
  48 #include "gc/shared/gcTraceTime.inline.hpp"
  49 #include "gc/shared/isGCActiveMark.hpp"
  50 #include "gc/shared/referencePolicy.hpp"
  51 #include "gc/shared/referenceProcessor.hpp"
  52 #include "gc/shared/spaceDecorator.hpp"
  53 #include "logging/log.hpp"
  54 #include "oops/instanceKlass.inline.hpp"
  55 #include "oops/instanceMirrorKlass.inline.hpp"
  56 #include "oops/methodData.hpp"
  57 #include "oops/objArrayKlass.inline.hpp"
  58 #include "oops/oop.inline.hpp"
  59 #include "runtime/atomic.inline.hpp"
  60 #include "runtime/fprofiler.hpp"
  61 #include "runtime/safepoint.hpp"
  62 #include "runtime/vmThread.hpp"
  63 #include "services/management.hpp"
  64 #include "services/memTracker.hpp"
  65 #include "services/memoryService.hpp"
  66 #include "utilities/events.hpp"
  67 #include "utilities/stack.inline.hpp"
  68 
  69 #include <math.h>
  70 
  71 // All sizes are in HeapWords.
  72 const size_t ParallelCompactData::Log2RegionSize  = 16; // 64K words
  73 const size_t ParallelCompactData::RegionSize      = (size_t)1 << Log2RegionSize;
  74 const size_t ParallelCompactData::RegionSizeBytes =
  75   RegionSize << LogHeapWordSize;
  76 const size_t ParallelCompactData::RegionSizeOffsetMask = RegionSize - 1;
  77 const size_t ParallelCompactData::RegionAddrOffsetMask = RegionSizeBytes - 1;
  78 const size_t ParallelCompactData::RegionAddrMask       = ~RegionAddrOffsetMask;
  79 
  80 const size_t ParallelCompactData::Log2BlockSize   = 7; // 128 words
  81 const size_t ParallelCompactData::BlockSize       = (size_t)1 << Log2BlockSize;
  82 const size_t ParallelCompactData::BlockSizeBytes  =
  83   BlockSize << LogHeapWordSize;
  84 const size_t ParallelCompactData::BlockSizeOffsetMask = BlockSize - 1;
  85 const size_t ParallelCompactData::BlockAddrOffsetMask = BlockSizeBytes - 1;
  86 const size_t ParallelCompactData::BlockAddrMask       = ~BlockAddrOffsetMask;
  87 
  88 const size_t ParallelCompactData::BlocksPerRegion = RegionSize / BlockSize;
  89 const size_t ParallelCompactData::Log2BlocksPerRegion =
  90   Log2RegionSize - Log2BlockSize;
  91 
  92 const ParallelCompactData::RegionData::region_sz_t
  93 ParallelCompactData::RegionData::dc_shift = 27;
  94 
  95 const ParallelCompactData::RegionData::region_sz_t
  96 ParallelCompactData::RegionData::dc_mask = ~0U << dc_shift;
  97 
  98 const ParallelCompactData::RegionData::region_sz_t
  99 ParallelCompactData::RegionData::dc_one = 0x1U << dc_shift;
 100 
 101 const ParallelCompactData::RegionData::region_sz_t
 102 ParallelCompactData::RegionData::los_mask = ~dc_mask;
 103 
 104 const ParallelCompactData::RegionData::region_sz_t
 105 ParallelCompactData::RegionData::dc_claimed = 0x8U << dc_shift;
 106 
 107 const ParallelCompactData::RegionData::region_sz_t
 108 ParallelCompactData::RegionData::dc_completed = 0xcU << dc_shift;
 109 
 110 SpaceInfo PSParallelCompact::_space_info[PSParallelCompact::last_space_id];
 111 
 112 ReferenceProcessor* PSParallelCompact::_ref_processor = NULL;
 113 
 114 double PSParallelCompact::_dwl_mean;
 115 double PSParallelCompact::_dwl_std_dev;
 116 double PSParallelCompact::_dwl_first_term;
 117 double PSParallelCompact::_dwl_adjustment;
 118 #ifdef  ASSERT
 119 bool   PSParallelCompact::_dwl_initialized = false;
 120 #endif  // #ifdef ASSERT
 121 
 122 void SplitInfo::record(size_t src_region_idx, size_t partial_obj_size,
 123                        HeapWord* destination)
 124 {
 125   assert(src_region_idx != 0, "invalid src_region_idx");
 126   assert(partial_obj_size != 0, "invalid partial_obj_size argument");
 127   assert(destination != NULL, "invalid destination argument");
 128 
 129   _src_region_idx = src_region_idx;
 130   _partial_obj_size = partial_obj_size;
 131   _destination = destination;
 132 
 133   // These fields may not be updated below, so make sure they're clear.
 134   assert(_dest_region_addr == NULL, "should have been cleared");
 135   assert(_first_src_addr == NULL, "should have been cleared");
 136 
 137   // Determine the number of destination regions for the partial object.
 138   HeapWord* const last_word = destination + partial_obj_size - 1;
 139   const ParallelCompactData& sd = PSParallelCompact::summary_data();
 140   HeapWord* const beg_region_addr = sd.region_align_down(destination);
 141   HeapWord* const end_region_addr = sd.region_align_down(last_word);
 142 
 143   if (beg_region_addr == end_region_addr) {
 144     // One destination region.
 145     _destination_count = 1;
 146     if (end_region_addr == destination) {
 147       // The destination falls on a region boundary, thus the first word of the
 148       // partial object will be the first word copied to the destination region.
 149       _dest_region_addr = end_region_addr;
 150       _first_src_addr = sd.region_to_addr(src_region_idx);
 151     }
 152   } else {
 153     // Two destination regions.  When copied, the partial object will cross a
 154     // destination region boundary, so a word somewhere within the partial
 155     // object will be the first word copied to the second destination region.
 156     _destination_count = 2;
 157     _dest_region_addr = end_region_addr;
 158     const size_t ofs = pointer_delta(end_region_addr, destination);
 159     assert(ofs < _partial_obj_size, "sanity");
 160     _first_src_addr = sd.region_to_addr(src_region_idx) + ofs;
 161   }
 162 }
 163 
 164 void SplitInfo::clear()
 165 {
 166   _src_region_idx = 0;
 167   _partial_obj_size = 0;
 168   _destination = NULL;
 169   _destination_count = 0;
 170   _dest_region_addr = NULL;
 171   _first_src_addr = NULL;
 172   assert(!is_valid(), "sanity");
 173 }
 174 
 175 #ifdef  ASSERT
 176 void SplitInfo::verify_clear()
 177 {
 178   assert(_src_region_idx == 0, "not clear");
 179   assert(_partial_obj_size == 0, "not clear");
 180   assert(_destination == NULL, "not clear");
 181   assert(_destination_count == 0, "not clear");
 182   assert(_dest_region_addr == NULL, "not clear");
 183   assert(_first_src_addr == NULL, "not clear");
 184 }
 185 #endif  // #ifdef ASSERT
 186 
 187 
 188 void PSParallelCompact::print_on_error(outputStream* st) {
 189   _mark_bitmap.print_on_error(st);
 190 }
 191 
 192 #ifndef PRODUCT
 193 const char* PSParallelCompact::space_names[] = {
 194   "old ", "eden", "from", "to  "
 195 };
 196 
 197 void PSParallelCompact::print_region_ranges() {
 198   if (!develop_log_is_enabled(Trace, gc, compaction, phases)) {
 199     return;
 200   }
 201   LogHandle(gc, compaction, phases) log;
 202   ResourceMark rm;
 203   Universe::print_on(log.trace_stream());
 204   log.trace("space  bottom     top        end        new_top");
 205   log.trace("------ ---------- ---------- ---------- ----------");
 206 
 207   for (unsigned int id = 0; id < last_space_id; ++id) {
 208     const MutableSpace* space = _space_info[id].space();
 209     log.trace("%u %s "
 210               SIZE_FORMAT_W(10) " " SIZE_FORMAT_W(10) " "
 211               SIZE_FORMAT_W(10) " " SIZE_FORMAT_W(10) " ",
 212               id, space_names[id],
 213               summary_data().addr_to_region_idx(space->bottom()),
 214               summary_data().addr_to_region_idx(space->top()),
 215               summary_data().addr_to_region_idx(space->end()),
 216               summary_data().addr_to_region_idx(_space_info[id].new_top()));
 217   }
 218 }
 219 
 220 void
 221 print_generic_summary_region(size_t i, const ParallelCompactData::RegionData* c)
 222 {
 223 #define REGION_IDX_FORMAT        SIZE_FORMAT_W(7)
 224 #define REGION_DATA_FORMAT       SIZE_FORMAT_W(5)
 225 
 226   ParallelCompactData& sd = PSParallelCompact::summary_data();
 227   size_t dci = c->destination() ? sd.addr_to_region_idx(c->destination()) : 0;
 228   log_develop_trace(gc, compaction, phases)(
 229       REGION_IDX_FORMAT " " PTR_FORMAT " "
 230       REGION_IDX_FORMAT " " PTR_FORMAT " "
 231       REGION_DATA_FORMAT " " REGION_DATA_FORMAT " "
 232       REGION_DATA_FORMAT " " REGION_IDX_FORMAT " %d",
 233       i, p2i(c->data_location()), dci, p2i(c->destination()),
 234       c->partial_obj_size(), c->live_obj_size(),
 235       c->data_size(), c->source_region(), c->destination_count());
 236 
 237 #undef  REGION_IDX_FORMAT
 238 #undef  REGION_DATA_FORMAT
 239 }
 240 
 241 void
 242 print_generic_summary_data(ParallelCompactData& summary_data,
 243                            HeapWord* const beg_addr,
 244                            HeapWord* const end_addr)
 245 {
 246   size_t total_words = 0;
 247   size_t i = summary_data.addr_to_region_idx(beg_addr);
 248   const size_t last = summary_data.addr_to_region_idx(end_addr);
 249   HeapWord* pdest = 0;
 250 
 251   while (i <= last) {
 252     ParallelCompactData::RegionData* c = summary_data.region(i);
 253     if (c->data_size() != 0 || c->destination() != pdest) {
 254       print_generic_summary_region(i, c);
 255       total_words += c->data_size();
 256       pdest = c->destination();
 257     }
 258     ++i;
 259   }
 260 
 261   log_develop_trace(gc, compaction, phases)("summary_data_bytes=" SIZE_FORMAT, total_words * HeapWordSize);
 262 }
 263 
 264 void
 265 print_generic_summary_data(ParallelCompactData& summary_data,
 266                            SpaceInfo* space_info)
 267 {
 268   if (!develop_log_is_enabled(Trace, gc, compaction, phases)) {
 269     return;
 270   }
 271 
 272   for (unsigned int id = 0; id < PSParallelCompact::last_space_id; ++id) {
 273     const MutableSpace* space = space_info[id].space();
 274     print_generic_summary_data(summary_data, space->bottom(),
 275                                MAX2(space->top(), space_info[id].new_top()));
 276   }
 277 }
 278 
 279 void
 280 print_initial_summary_data(ParallelCompactData& summary_data,
 281                            const MutableSpace* space) {
 282   if (space->top() == space->bottom()) {
 283     return;
 284   }
 285 
 286   const size_t region_size = ParallelCompactData::RegionSize;
 287   typedef ParallelCompactData::RegionData RegionData;
 288   HeapWord* const top_aligned_up = summary_data.region_align_up(space->top());
 289   const size_t end_region = summary_data.addr_to_region_idx(top_aligned_up);
 290   const RegionData* c = summary_data.region(end_region - 1);
 291   HeapWord* end_addr = c->destination() + c->data_size();
 292   const size_t live_in_space = pointer_delta(end_addr, space->bottom());
 293 
 294   // Print (and count) the full regions at the beginning of the space.
 295   size_t full_region_count = 0;
 296   size_t i = summary_data.addr_to_region_idx(space->bottom());
 297   while (i < end_region && summary_data.region(i)->data_size() == region_size) {
 298     ParallelCompactData::RegionData* c = summary_data.region(i);
 299     log_develop_trace(gc, compaction, phases)(
 300         SIZE_FORMAT_W(5) " " PTR_FORMAT " " SIZE_FORMAT_W(5) " " SIZE_FORMAT_W(5) " " SIZE_FORMAT_W(5) " " SIZE_FORMAT_W(5) " %d",
 301         i, p2i(c->destination()),
 302         c->partial_obj_size(), c->live_obj_size(),
 303         c->data_size(), c->source_region(), c->destination_count());
 304     ++full_region_count;
 305     ++i;
 306   }
 307 
 308   size_t live_to_right = live_in_space - full_region_count * region_size;
 309 
 310   double max_reclaimed_ratio = 0.0;
 311   size_t max_reclaimed_ratio_region = 0;
 312   size_t max_dead_to_right = 0;
 313   size_t max_live_to_right = 0;
 314 
 315   // Print the 'reclaimed ratio' for regions while there is something live in
 316   // the region or to the right of it.  The remaining regions are empty (and
 317   // uninteresting), and computing the ratio will result in division by 0.
 318   while (i < end_region && live_to_right > 0) {
 319     c = summary_data.region(i);
 320     HeapWord* const region_addr = summary_data.region_to_addr(i);
 321     const size_t used_to_right = pointer_delta(space->top(), region_addr);
 322     const size_t dead_to_right = used_to_right - live_to_right;
 323     const double reclaimed_ratio = double(dead_to_right) / live_to_right;
 324 
 325     if (reclaimed_ratio > max_reclaimed_ratio) {
 326             max_reclaimed_ratio = reclaimed_ratio;
 327             max_reclaimed_ratio_region = i;
 328             max_dead_to_right = dead_to_right;
 329             max_live_to_right = live_to_right;
 330     }
 331 
 332     ParallelCompactData::RegionData* c = summary_data.region(i);
 333     log_develop_trace(gc, compaction, phases)(
 334         SIZE_FORMAT_W(5) " " PTR_FORMAT " " SIZE_FORMAT_W(5) " " SIZE_FORMAT_W(5) " " SIZE_FORMAT_W(5) " " SIZE_FORMAT_W(5) " %d"
 335         "%12.10f " SIZE_FORMAT_W(10) " " SIZE_FORMAT_W(10),
 336         i, p2i(c->destination()),
 337         c->partial_obj_size(), c->live_obj_size(),
 338         c->data_size(), c->source_region(), c->destination_count(),
 339         reclaimed_ratio, dead_to_right, live_to_right);
 340 
 341 
 342     live_to_right -= c->data_size();
 343     ++i;
 344   }
 345 
 346   // Any remaining regions are empty.  Print one more if there is one.
 347   if (i < end_region) {
 348     ParallelCompactData::RegionData* c = summary_data.region(i);
 349     log_develop_trace(gc, compaction, phases)(
 350         SIZE_FORMAT_W(5) " " PTR_FORMAT " " SIZE_FORMAT_W(5) " " SIZE_FORMAT_W(5) " " SIZE_FORMAT_W(5) " " SIZE_FORMAT_W(5) " %d",
 351          i, p2i(c->destination()),
 352          c->partial_obj_size(), c->live_obj_size(),
 353          c->data_size(), c->source_region(), c->destination_count());
 354   }
 355 
 356   log_develop_trace(gc, compaction, phases)("max:  " SIZE_FORMAT_W(4) " d2r=" SIZE_FORMAT_W(10) " l2r=" SIZE_FORMAT_W(10) " max_ratio=%14.12f",
 357                                             max_reclaimed_ratio_region, max_dead_to_right, max_live_to_right, max_reclaimed_ratio);
 358 }
 359 
 360 void
 361 print_initial_summary_data(ParallelCompactData& summary_data,
 362                            SpaceInfo* space_info) {
 363   if (!develop_log_is_enabled(Trace, gc, compaction, phases)) {
 364     return;
 365   }
 366 
 367   unsigned int id = PSParallelCompact::old_space_id;
 368   const MutableSpace* space;
 369   do {
 370     space = space_info[id].space();
 371     print_initial_summary_data(summary_data, space);
 372   } while (++id < PSParallelCompact::eden_space_id);
 373 
 374   do {
 375     space = space_info[id].space();
 376     print_generic_summary_data(summary_data, space->bottom(), space->top());
 377   } while (++id < PSParallelCompact::last_space_id);
 378 }
 379 #endif  // #ifndef PRODUCT
 380 
 381 #ifdef  ASSERT
 382 size_t add_obj_count;
 383 size_t add_obj_size;
 384 size_t mark_bitmap_count;
 385 size_t mark_bitmap_size;
 386 #endif  // #ifdef ASSERT
 387 
 388 ParallelCompactData::ParallelCompactData()
 389 {
 390   _region_start = 0;
 391 
 392   _region_vspace = 0;
 393   _reserved_byte_size = 0;
 394   _region_data = 0;
 395   _region_count = 0;
 396 
 397   _block_vspace = 0;
 398   _block_data = 0;
 399   _block_count = 0;
 400 }
 401 
 402 bool ParallelCompactData::initialize(MemRegion covered_region)
 403 {
 404   _region_start = covered_region.start();
 405   const size_t region_size = covered_region.word_size();
 406   DEBUG_ONLY(_region_end = _region_start + region_size;)
 407 
 408   assert(region_align_down(_region_start) == _region_start,
 409          "region start not aligned");
 410   assert((region_size & RegionSizeOffsetMask) == 0,
 411          "region size not a multiple of RegionSize");
 412 
 413   bool result = initialize_region_data(region_size) && initialize_block_data();
 414   return result;
 415 }
 416 
 417 PSVirtualSpace*
 418 ParallelCompactData::create_vspace(size_t count, size_t element_size)
 419 {
 420   const size_t raw_bytes = count * element_size;
 421   const size_t page_sz = os::page_size_for_region_aligned(raw_bytes, 10);
 422   const size_t granularity = os::vm_allocation_granularity();
 423   _reserved_byte_size = align_size_up(raw_bytes, MAX2(page_sz, granularity));
 424 
 425   const size_t rs_align = page_sz == (size_t) os::vm_page_size() ? 0 :
 426     MAX2(page_sz, granularity);
 427   ReservedSpace rs(_reserved_byte_size, rs_align, rs_align > 0);
 428   os::trace_page_sizes("par compact", raw_bytes, raw_bytes, page_sz, rs.base(),
 429                        rs.size());
 430 
 431   MemTracker::record_virtual_memory_type((address)rs.base(), mtGC);
 432 
 433   PSVirtualSpace* vspace = new PSVirtualSpace(rs, page_sz);
 434   if (vspace != 0) {
 435     if (vspace->expand_by(_reserved_byte_size)) {
 436       return vspace;
 437     }
 438     delete vspace;
 439     // Release memory reserved in the space.
 440     rs.release();
 441   }
 442 
 443   return 0;
 444 }
 445 
 446 bool ParallelCompactData::initialize_region_data(size_t region_size)
 447 {
 448   const size_t count = (region_size + RegionSizeOffsetMask) >> Log2RegionSize;
 449   _region_vspace = create_vspace(count, sizeof(RegionData));
 450   if (_region_vspace != 0) {
 451     _region_data = (RegionData*)_region_vspace->reserved_low_addr();
 452     _region_count = count;
 453     return true;
 454   }
 455   return false;
 456 }
 457 
 458 bool ParallelCompactData::initialize_block_data()
 459 {
 460   assert(_region_count != 0, "region data must be initialized first");
 461   const size_t count = _region_count << Log2BlocksPerRegion;
 462   _block_vspace = create_vspace(count, sizeof(BlockData));
 463   if (_block_vspace != 0) {
 464     _block_data = (BlockData*)_block_vspace->reserved_low_addr();
 465     _block_count = count;
 466     return true;
 467   }
 468   return false;
 469 }
 470 
 471 void ParallelCompactData::clear()
 472 {
 473   memset(_region_data, 0, _region_vspace->committed_size());
 474   memset(_block_data, 0, _block_vspace->committed_size());
 475 }
 476 
 477 void ParallelCompactData::clear_range(size_t beg_region, size_t end_region) {
 478   assert(beg_region <= _region_count, "beg_region out of range");
 479   assert(end_region <= _region_count, "end_region out of range");
 480   assert(RegionSize % BlockSize == 0, "RegionSize not a multiple of BlockSize");
 481 
 482   const size_t region_cnt = end_region - beg_region;
 483   memset(_region_data + beg_region, 0, region_cnt * sizeof(RegionData));
 484 
 485   const size_t beg_block = beg_region * BlocksPerRegion;
 486   const size_t block_cnt = region_cnt * BlocksPerRegion;
 487   memset(_block_data + beg_block, 0, block_cnt * sizeof(BlockData));
 488 }
 489 
 490 HeapWord* ParallelCompactData::partial_obj_end(size_t region_idx) const
 491 {
 492   const RegionData* cur_cp = region(region_idx);
 493   const RegionData* const end_cp = region(region_count() - 1);
 494 
 495   HeapWord* result = region_to_addr(region_idx);
 496   if (cur_cp < end_cp) {
 497     do {
 498       result += cur_cp->partial_obj_size();
 499     } while (cur_cp->partial_obj_size() == RegionSize && ++cur_cp < end_cp);
 500   }
 501   return result;
 502 }
 503 
 504 void ParallelCompactData::add_obj(HeapWord* addr, size_t len)
 505 {
 506   const size_t obj_ofs = pointer_delta(addr, _region_start);
 507   const size_t beg_region = obj_ofs >> Log2RegionSize;
 508   const size_t end_region = (obj_ofs + len - 1) >> Log2RegionSize;
 509 
 510   DEBUG_ONLY(Atomic::inc_ptr(&add_obj_count);)
 511   DEBUG_ONLY(Atomic::add_ptr(len, &add_obj_size);)
 512 
 513   if (beg_region == end_region) {
 514     // All in one region.
 515     _region_data[beg_region].add_live_obj(len);
 516     return;
 517   }
 518 
 519   // First region.
 520   const size_t beg_ofs = region_offset(addr);
 521   _region_data[beg_region].add_live_obj(RegionSize - beg_ofs);
 522 
 523   Klass* klass = ((oop)addr)->klass();
 524   // Middle regions--completely spanned by this object.
 525   for (size_t region = beg_region + 1; region < end_region; ++region) {
 526     _region_data[region].set_partial_obj_size(RegionSize);
 527     _region_data[region].set_partial_obj_addr(addr);
 528   }
 529 
 530   // Last region.
 531   const size_t end_ofs = region_offset(addr + len - 1);
 532   _region_data[end_region].set_partial_obj_size(end_ofs + 1);
 533   _region_data[end_region].set_partial_obj_addr(addr);
 534 }
 535 
 536 void
 537 ParallelCompactData::summarize_dense_prefix(HeapWord* beg, HeapWord* end)
 538 {
 539   assert(region_offset(beg) == 0, "not RegionSize aligned");
 540   assert(region_offset(end) == 0, "not RegionSize aligned");
 541 
 542   size_t cur_region = addr_to_region_idx(beg);
 543   const size_t end_region = addr_to_region_idx(end);
 544   HeapWord* addr = beg;
 545   while (cur_region < end_region) {
 546     _region_data[cur_region].set_destination(addr);
 547     _region_data[cur_region].set_destination_count(0);
 548     _region_data[cur_region].set_source_region(cur_region);
 549     _region_data[cur_region].set_data_location(addr);
 550 
 551     // Update live_obj_size so the region appears completely full.
 552     size_t live_size = RegionSize - _region_data[cur_region].partial_obj_size();
 553     _region_data[cur_region].set_live_obj_size(live_size);
 554 
 555     ++cur_region;
 556     addr += RegionSize;
 557   }
 558 }
 559 
 560 // Find the point at which a space can be split and, if necessary, record the
 561 // split point.
 562 //
 563 // If the current src region (which overflowed the destination space) doesn't
 564 // have a partial object, the split point is at the beginning of the current src
 565 // region (an "easy" split, no extra bookkeeping required).
 566 //
 567 // If the current src region has a partial object, the split point is in the
 568 // region where that partial object starts (call it the split_region).  If
 569 // split_region has a partial object, then the split point is just after that
 570 // partial object (a "hard" split where we have to record the split data and
 571 // zero the partial_obj_size field).  With a "hard" split, we know that the
 572 // partial_obj ends within split_region because the partial object that caused
 573 // the overflow starts in split_region.  If split_region doesn't have a partial
 574 // obj, then the split is at the beginning of split_region (another "easy"
 575 // split).
 576 HeapWord*
 577 ParallelCompactData::summarize_split_space(size_t src_region,
 578                                            SplitInfo& split_info,
 579                                            HeapWord* destination,
 580                                            HeapWord* target_end,
 581                                            HeapWord** target_next)
 582 {
 583   assert(destination <= target_end, "sanity");
 584   assert(destination + _region_data[src_region].data_size() > target_end,
 585     "region should not fit into target space");
 586   assert(is_region_aligned(target_end), "sanity");
 587 
 588   size_t split_region = src_region;
 589   HeapWord* split_destination = destination;
 590   size_t partial_obj_size = _region_data[src_region].partial_obj_size();
 591 
 592   if (destination + partial_obj_size > target_end) {
 593     // The split point is just after the partial object (if any) in the
 594     // src_region that contains the start of the object that overflowed the
 595     // destination space.
 596     //
 597     // Find the start of the "overflow" object and set split_region to the
 598     // region containing it.
 599     HeapWord* const overflow_obj = _region_data[src_region].partial_obj_addr();
 600     split_region = addr_to_region_idx(overflow_obj);
 601 
 602     // Clear the source_region field of all destination regions whose first word
 603     // came from data after the split point (a non-null source_region field
 604     // implies a region must be filled).
 605     //
 606     // An alternative to the simple loop below:  clear during post_compact(),
 607     // which uses memcpy instead of individual stores, and is easy to
 608     // parallelize.  (The downside is that it clears the entire RegionData
 609     // object as opposed to just one field.)
 610     //
 611     // post_compact() would have to clear the summary data up to the highest
 612     // address that was written during the summary phase, which would be
 613     //
 614     //         max(top, max(new_top, clear_top))
 615     //
 616     // where clear_top is a new field in SpaceInfo.  Would have to set clear_top
 617     // to target_end.
 618     const RegionData* const sr = region(split_region);
 619     const size_t beg_idx =
 620       addr_to_region_idx(region_align_up(sr->destination() +
 621                                          sr->partial_obj_size()));
 622     const size_t end_idx = addr_to_region_idx(target_end);
 623 
 624     log_develop_trace(gc, compaction, phases)("split:  clearing source_region field in [" SIZE_FORMAT ", " SIZE_FORMAT ")", beg_idx, end_idx);
 625     for (size_t idx = beg_idx; idx < end_idx; ++idx) {
 626       _region_data[idx].set_source_region(0);
 627     }
 628 
 629     // Set split_destination and partial_obj_size to reflect the split region.
 630     split_destination = sr->destination();
 631     partial_obj_size = sr->partial_obj_size();
 632   }
 633 
 634   // The split is recorded only if a partial object extends onto the region.
 635   if (partial_obj_size != 0) {
 636     _region_data[split_region].set_partial_obj_size(0);
 637     split_info.record(split_region, partial_obj_size, split_destination);
 638   }
 639 
 640   // Setup the continuation addresses.
 641   *target_next = split_destination + partial_obj_size;
 642   HeapWord* const source_next = region_to_addr(split_region) + partial_obj_size;
 643 
 644   if (develop_log_is_enabled(Trace, gc, compaction, phases)) {
 645     const char * split_type = partial_obj_size == 0 ? "easy" : "hard";
 646     log_develop_trace(gc, compaction, phases)("%s split:  src=" PTR_FORMAT " src_c=" SIZE_FORMAT " pos=" SIZE_FORMAT,
 647                                               split_type, p2i(source_next), split_region, partial_obj_size);
 648     log_develop_trace(gc, compaction, phases)("%s split:  dst=" PTR_FORMAT " dst_c=" SIZE_FORMAT " tn=" PTR_FORMAT,
 649                                               split_type, p2i(split_destination),
 650                                               addr_to_region_idx(split_destination),
 651                                               p2i(*target_next));
 652 
 653     if (partial_obj_size != 0) {
 654       HeapWord* const po_beg = split_info.destination();
 655       HeapWord* const po_end = po_beg + split_info.partial_obj_size();
 656       log_develop_trace(gc, compaction, phases)("%s split:  po_beg=" PTR_FORMAT " " SIZE_FORMAT " po_end=" PTR_FORMAT " " SIZE_FORMAT,
 657                                                 split_type,
 658                                                 p2i(po_beg), addr_to_region_idx(po_beg),
 659                                                 p2i(po_end), addr_to_region_idx(po_end));
 660     }
 661   }
 662 
 663   return source_next;
 664 }
 665 
 666 bool ParallelCompactData::summarize(SplitInfo& split_info,
 667                                     HeapWord* source_beg, HeapWord* source_end,
 668                                     HeapWord** source_next,
 669                                     HeapWord* target_beg, HeapWord* target_end,
 670                                     HeapWord** target_next)
 671 {
 672   HeapWord* const source_next_val = source_next == NULL ? NULL : *source_next;
 673   log_develop_trace(gc, compaction, phases)(
 674       "sb=" PTR_FORMAT " se=" PTR_FORMAT " sn=" PTR_FORMAT
 675       "tb=" PTR_FORMAT " te=" PTR_FORMAT " tn=" PTR_FORMAT,
 676       p2i(source_beg), p2i(source_end), p2i(source_next_val),
 677       p2i(target_beg), p2i(target_end), p2i(*target_next));
 678 
 679   size_t cur_region = addr_to_region_idx(source_beg);
 680   const size_t end_region = addr_to_region_idx(region_align_up(source_end));
 681 
 682   HeapWord *dest_addr = target_beg;
 683   while (cur_region < end_region) {
 684     // The destination must be set even if the region has no data.
 685     _region_data[cur_region].set_destination(dest_addr);
 686 
 687     size_t words = _region_data[cur_region].data_size();
 688     if (words > 0) {
 689       // If cur_region does not fit entirely into the target space, find a point
 690       // at which the source space can be 'split' so that part is copied to the
 691       // target space and the rest is copied elsewhere.
 692       if (dest_addr + words > target_end) {
 693         assert(source_next != NULL, "source_next is NULL when splitting");
 694         *source_next = summarize_split_space(cur_region, split_info, dest_addr,
 695                                              target_end, target_next);
 696         return false;
 697       }
 698 
 699       // Compute the destination_count for cur_region, and if necessary, update
 700       // source_region for a destination region.  The source_region field is
 701       // updated if cur_region is the first (left-most) region to be copied to a
 702       // destination region.
 703       //
 704       // The destination_count calculation is a bit subtle.  A region that has
 705       // data that compacts into itself does not count itself as a destination.
 706       // This maintains the invariant that a zero count means the region is
 707       // available and can be claimed and then filled.
 708       uint destination_count = 0;
 709       if (split_info.is_split(cur_region)) {
 710         // The current region has been split:  the partial object will be copied
 711         // to one destination space and the remaining data will be copied to
 712         // another destination space.  Adjust the initial destination_count and,
 713         // if necessary, set the source_region field if the partial object will
 714         // cross a destination region boundary.
 715         destination_count = split_info.destination_count();
 716         if (destination_count == 2) {
 717           size_t dest_idx = addr_to_region_idx(split_info.dest_region_addr());
 718           _region_data[dest_idx].set_source_region(cur_region);
 719         }
 720       }
 721 
 722       HeapWord* const last_addr = dest_addr + words - 1;
 723       const size_t dest_region_1 = addr_to_region_idx(dest_addr);
 724       const size_t dest_region_2 = addr_to_region_idx(last_addr);
 725 
 726       // Initially assume that the destination regions will be the same and
 727       // adjust the value below if necessary.  Under this assumption, if
 728       // cur_region == dest_region_2, then cur_region will be compacted
 729       // completely into itself.
 730       destination_count += cur_region == dest_region_2 ? 0 : 1;
 731       if (dest_region_1 != dest_region_2) {
 732         // Destination regions differ; adjust destination_count.
 733         destination_count += 1;
 734         // Data from cur_region will be copied to the start of dest_region_2.
 735         _region_data[dest_region_2].set_source_region(cur_region);
 736       } else if (region_offset(dest_addr) == 0) {
 737         // Data from cur_region will be copied to the start of the destination
 738         // region.
 739         _region_data[dest_region_1].set_source_region(cur_region);
 740       }
 741 
 742       _region_data[cur_region].set_destination_count(destination_count);
 743       _region_data[cur_region].set_data_location(region_to_addr(cur_region));
 744       dest_addr += words;
 745     }
 746 
 747     ++cur_region;
 748   }
 749 
 750   *target_next = dest_addr;
 751   return true;
 752 }
 753 
 754 HeapWord* ParallelCompactData::calc_new_pointer(HeapWord* addr) {
 755   assert(addr != NULL, "Should detect NULL oop earlier");
 756   assert(ParallelScavengeHeap::heap()->is_in(addr), "not in heap");
 757   assert(PSParallelCompact::mark_bitmap()->is_marked(addr), "not marked");
 758 
 759   // Region covering the object.
 760   RegionData* const region_ptr = addr_to_region_ptr(addr);
 761   HeapWord* result = region_ptr->destination();
 762 
 763   // If the entire Region is live, the new location is region->destination + the
 764   // offset of the object within in the Region.
 765 
 766   // Run some performance tests to determine if this special case pays off.  It
 767   // is worth it for pointers into the dense prefix.  If the optimization to
 768   // avoid pointer updates in regions that only point to the dense prefix is
 769   // ever implemented, this should be revisited.
 770   if (region_ptr->data_size() == RegionSize) {
 771     result += region_offset(addr);
 772     return result;
 773   }
 774 
 775   // Otherwise, the new location is region->destination + block offset + the
 776   // number of live words in the Block that are (a) to the left of addr and (b)
 777   // due to objects that start in the Block.
 778 
 779   // Fill in the block table if necessary.  This is unsynchronized, so multiple
 780   // threads may fill the block table for a region (harmless, since it is
 781   // idempotent).
 782   if (!region_ptr->blocks_filled()) {
 783     PSParallelCompact::fill_blocks(addr_to_region_idx(addr));
 784     region_ptr->set_blocks_filled();
 785   }
 786 
 787   HeapWord* const search_start = block_align_down(addr);
 788   const size_t block_offset = addr_to_block_ptr(addr)->offset();
 789 
 790   const ParMarkBitMap* bitmap = PSParallelCompact::mark_bitmap();
 791   const size_t live = bitmap->live_words_in_range(search_start, oop(addr));
 792   result += block_offset + live;
 793   DEBUG_ONLY(PSParallelCompact::check_new_location(addr, result));
 794   return result;
 795 }
 796 
 797 #ifdef ASSERT
 798 void ParallelCompactData::verify_clear(const PSVirtualSpace* vspace)
 799 {
 800   const size_t* const beg = (const size_t*)vspace->committed_low_addr();
 801   const size_t* const end = (const size_t*)vspace->committed_high_addr();
 802   for (const size_t* p = beg; p < end; ++p) {
 803     assert(*p == 0, "not zero");
 804   }
 805 }
 806 
 807 void ParallelCompactData::verify_clear()
 808 {
 809   verify_clear(_region_vspace);
 810   verify_clear(_block_vspace);
 811 }
 812 #endif  // #ifdef ASSERT
 813 
 814 STWGCTimer          PSParallelCompact::_gc_timer;
 815 ParallelOldTracer   PSParallelCompact::_gc_tracer;
 816 elapsedTimer        PSParallelCompact::_accumulated_time;
 817 unsigned int        PSParallelCompact::_total_invocations = 0;
 818 unsigned int        PSParallelCompact::_maximum_compaction_gc_num = 0;
 819 jlong               PSParallelCompact::_time_of_last_gc = 0;
 820 CollectorCounters*  PSParallelCompact::_counters = NULL;
 821 ParMarkBitMap       PSParallelCompact::_mark_bitmap;
 822 ParallelCompactData PSParallelCompact::_summary_data;
 823 
 824 PSParallelCompact::IsAliveClosure PSParallelCompact::_is_alive_closure;
 825 
 826 bool PSParallelCompact::IsAliveClosure::do_object_b(oop p) { return mark_bitmap()->is_marked(p); }
 827 
 828 PSParallelCompact::AdjustPointerClosure PSParallelCompact::_adjust_pointer_closure;
 829 PSParallelCompact::AdjustKlassClosure PSParallelCompact::_adjust_klass_closure;
 830 
 831 void PSParallelCompact::AdjustKlassClosure::do_klass(Klass* klass) {
 832   klass->oops_do(&PSParallelCompact::_adjust_pointer_closure);

 833 }
 834 
 835 void PSParallelCompact::post_initialize() {
 836   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
 837   MemRegion mr = heap->reserved_region();
 838   _ref_processor =
 839     new ReferenceProcessor(mr,            // span
 840                            ParallelRefProcEnabled && (ParallelGCThreads > 1), // mt processing
 841                            ParallelGCThreads, // mt processing degree
 842                            true,              // mt discovery
 843                            ParallelGCThreads, // mt discovery degree
 844                            true,              // atomic_discovery
 845                            &_is_alive_closure); // non-header is alive closure
 846   _counters = new CollectorCounters("PSParallelCompact", 1);
 847 
 848   // Initialize static fields in ParCompactionManager.
 849   ParCompactionManager::initialize(mark_bitmap());
 850 }
 851 
 852 bool PSParallelCompact::initialize() {
 853   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
 854   MemRegion mr = heap->reserved_region();
 855 
 856   // Was the old gen get allocated successfully?
 857   if (!heap->old_gen()->is_allocated()) {
 858     return false;
 859   }
 860 
 861   initialize_space_info();
 862   initialize_dead_wood_limiter();
 863 
 864   if (!_mark_bitmap.initialize(mr)) {
 865     vm_shutdown_during_initialization(
 866       err_msg("Unable to allocate " SIZE_FORMAT "KB bitmaps for parallel "
 867       "garbage collection for the requested " SIZE_FORMAT "KB heap.",
 868       _mark_bitmap.reserved_byte_size()/K, mr.byte_size()/K));
 869     return false;
 870   }
 871 
 872   if (!_summary_data.initialize(mr)) {
 873     vm_shutdown_during_initialization(
 874       err_msg("Unable to allocate " SIZE_FORMAT "KB card tables for parallel "
 875       "garbage collection for the requested " SIZE_FORMAT "KB heap.",
 876       _summary_data.reserved_byte_size()/K, mr.byte_size()/K));
 877     return false;
 878   }
 879 
 880   return true;
 881 }
 882 
 883 void PSParallelCompact::initialize_space_info()
 884 {
 885   memset(&_space_info, 0, sizeof(_space_info));
 886 
 887   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
 888   PSYoungGen* young_gen = heap->young_gen();
 889 
 890   _space_info[old_space_id].set_space(heap->old_gen()->object_space());
 891   _space_info[eden_space_id].set_space(young_gen->eden_space());
 892   _space_info[from_space_id].set_space(young_gen->from_space());
 893   _space_info[to_space_id].set_space(young_gen->to_space());
 894 
 895   _space_info[old_space_id].set_start_array(heap->old_gen()->start_array());
 896 }
 897 
 898 void PSParallelCompact::initialize_dead_wood_limiter()
 899 {
 900   const size_t max = 100;
 901   _dwl_mean = double(MIN2(ParallelOldDeadWoodLimiterMean, max)) / 100.0;
 902   _dwl_std_dev = double(MIN2(ParallelOldDeadWoodLimiterStdDev, max)) / 100.0;
 903   _dwl_first_term = 1.0 / (sqrt(2.0 * M_PI) * _dwl_std_dev);
 904   DEBUG_ONLY(_dwl_initialized = true;)
 905   _dwl_adjustment = normal_distribution(1.0);
 906 }
 907 
 908 void
 909 PSParallelCompact::clear_data_covering_space(SpaceId id)
 910 {
 911   // At this point, top is the value before GC, new_top() is the value that will
 912   // be set at the end of GC.  The marking bitmap is cleared to top; nothing
 913   // should be marked above top.  The summary data is cleared to the larger of
 914   // top & new_top.
 915   MutableSpace* const space = _space_info[id].space();
 916   HeapWord* const bot = space->bottom();
 917   HeapWord* const top = space->top();
 918   HeapWord* const max_top = MAX2(top, _space_info[id].new_top());
 919 
 920   const idx_t beg_bit = _mark_bitmap.addr_to_bit(bot);
 921   const idx_t end_bit = BitMap::word_align_up(_mark_bitmap.addr_to_bit(top));
 922   _mark_bitmap.clear_range(beg_bit, end_bit);
 923 
 924   const size_t beg_region = _summary_data.addr_to_region_idx(bot);
 925   const size_t end_region =
 926     _summary_data.addr_to_region_idx(_summary_data.region_align_up(max_top));
 927   _summary_data.clear_range(beg_region, end_region);
 928 
 929   // Clear the data used to 'split' regions.
 930   SplitInfo& split_info = _space_info[id].split_info();
 931   if (split_info.is_valid()) {
 932     split_info.clear();
 933   }
 934   DEBUG_ONLY(split_info.verify_clear();)
 935 }
 936 
 937 void PSParallelCompact::pre_compact()
 938 {
 939   // Update the from & to space pointers in space_info, since they are swapped
 940   // at each young gen gc.  Do the update unconditionally (even though a
 941   // promotion failure does not swap spaces) because an unknown number of young
 942   // collections will have swapped the spaces an unknown number of times.
 943   GCTraceTime(Trace, gc, phases) tm("Pre Compact", &_gc_timer);
 944   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
 945   _space_info[from_space_id].set_space(heap->young_gen()->from_space());
 946   _space_info[to_space_id].set_space(heap->young_gen()->to_space());
 947 
 948   DEBUG_ONLY(add_obj_count = add_obj_size = 0;)
 949   DEBUG_ONLY(mark_bitmap_count = mark_bitmap_size = 0;)
 950 
 951   // Increment the invocation count
 952   heap->increment_total_collections(true);
 953 
 954   // We need to track unique mark sweep invocations as well.
 955   _total_invocations++;
 956 
 957   heap->print_heap_before_gc();
 958   heap->trace_heap_before_gc(&_gc_tracer);
 959 
 960   // Fill in TLABs
 961   heap->accumulate_statistics_all_tlabs();
 962   heap->ensure_parsability(true);  // retire TLABs
 963 
 964   if (VerifyBeforeGC && heap->total_collections() >= VerifyGCStartAt) {
 965     HandleMark hm;  // Discard invalid handles created during verification
 966     Universe::verify("Before GC");
 967   }
 968 
 969   // Verify object start arrays
 970   if (VerifyObjectStartArray &&
 971       VerifyBeforeGC) {
 972     heap->old_gen()->verify_object_start_array();
 973   }
 974 
 975   DEBUG_ONLY(mark_bitmap()->verify_clear();)
 976   DEBUG_ONLY(summary_data().verify_clear();)
 977 
 978   // Have worker threads release resources the next time they run a task.
 979   gc_task_manager()->release_all_resources();


 980 }
 981 
 982 void PSParallelCompact::post_compact()
 983 {
 984   GCTraceTime(Trace, gc, phases) tm("Post Compact", &_gc_timer);
 985 
 986   for (unsigned int id = old_space_id; id < last_space_id; ++id) {
 987     // Clear the marking bitmap, summary data and split info.
 988     clear_data_covering_space(SpaceId(id));
 989     // Update top().  Must be done after clearing the bitmap and summary data.
 990     _space_info[id].publish_new_top();
 991   }
 992 
 993   MutableSpace* const eden_space = _space_info[eden_space_id].space();
 994   MutableSpace* const from_space = _space_info[from_space_id].space();
 995   MutableSpace* const to_space   = _space_info[to_space_id].space();
 996 
 997   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
 998   bool eden_empty = eden_space->is_empty();
 999   if (!eden_empty) {
1000     eden_empty = absorb_live_data_from_eden(heap->size_policy(),
1001                                             heap->young_gen(), heap->old_gen());
1002   }
1003 
1004   // Update heap occupancy information which is used as input to the soft ref
1005   // clearing policy at the next gc.
1006   Universe::update_heap_info_at_gc();
1007 
1008   bool young_gen_empty = eden_empty && from_space->is_empty() &&
1009     to_space->is_empty();
1010 
1011   ModRefBarrierSet* modBS = barrier_set_cast<ModRefBarrierSet>(heap->barrier_set());
1012   MemRegion old_mr = heap->old_gen()->reserved();
1013   if (young_gen_empty) {
1014     modBS->clear(MemRegion(old_mr.start(), old_mr.end()));
1015   } else {
1016     modBS->invalidate(MemRegion(old_mr.start(), old_mr.end()));
1017   }
1018 
1019   // Delete metaspaces for unloaded class loaders and clean up loader_data graph
1020   ClassLoaderDataGraph::purge();
1021   MetaspaceAux::verify_metrics();
1022 
1023   CodeCache::gc_epilogue();
1024   JvmtiExport::gc_epilogue();
1025 
1026 #if defined(COMPILER2) || INCLUDE_JVMCI
1027   DerivedPointerTable::update_pointers();
1028 #endif
1029 
1030   ref_processor()->enqueue_discovered_references(NULL);
1031 
1032   if (ZapUnusedHeapArea) {
1033     heap->gen_mangle_unused_area();
1034   }
1035 
1036   // Update time of last GC
1037   reset_millis_since_last_gc();
1038 }
1039 
1040 HeapWord*
1041 PSParallelCompact::compute_dense_prefix_via_density(const SpaceId id,
1042                                                     bool maximum_compaction)
1043 {
1044   const size_t region_size = ParallelCompactData::RegionSize;
1045   const ParallelCompactData& sd = summary_data();
1046 
1047   const MutableSpace* const space = _space_info[id].space();
1048   HeapWord* const top_aligned_up = sd.region_align_up(space->top());
1049   const RegionData* const beg_cp = sd.addr_to_region_ptr(space->bottom());
1050   const RegionData* const end_cp = sd.addr_to_region_ptr(top_aligned_up);
1051 
1052   // Skip full regions at the beginning of the space--they are necessarily part
1053   // of the dense prefix.
1054   size_t full_count = 0;
1055   const RegionData* cp;
1056   for (cp = beg_cp; cp < end_cp && cp->data_size() == region_size; ++cp) {
1057     ++full_count;
1058   }
1059 
1060   assert(total_invocations() >= _maximum_compaction_gc_num, "sanity");
1061   const size_t gcs_since_max = total_invocations() - _maximum_compaction_gc_num;
1062   const bool interval_ended = gcs_since_max > HeapMaximumCompactionInterval;
1063   if (maximum_compaction || cp == end_cp || interval_ended) {
1064     _maximum_compaction_gc_num = total_invocations();
1065     return sd.region_to_addr(cp);
1066   }
1067 
1068   HeapWord* const new_top = _space_info[id].new_top();
1069   const size_t space_live = pointer_delta(new_top, space->bottom());
1070   const size_t space_used = space->used_in_words();
1071   const size_t space_capacity = space->capacity_in_words();
1072 
1073   const double cur_density = double(space_live) / space_capacity;
1074   const double deadwood_density =
1075     (1.0 - cur_density) * (1.0 - cur_density) * cur_density * cur_density;
1076   const size_t deadwood_goal = size_t(space_capacity * deadwood_density);
1077 
1078   if (TraceParallelOldGCDensePrefix) {
1079     tty->print_cr("cur_dens=%5.3f dw_dens=%5.3f dw_goal=" SIZE_FORMAT,
1080                   cur_density, deadwood_density, deadwood_goal);
1081     tty->print_cr("space_live=" SIZE_FORMAT " " "space_used=" SIZE_FORMAT " "
1082                   "space_cap=" SIZE_FORMAT,
1083                   space_live, space_used,
1084                   space_capacity);
1085   }
1086 
1087   // XXX - Use binary search?
1088   HeapWord* dense_prefix = sd.region_to_addr(cp);
1089   const RegionData* full_cp = cp;
1090   const RegionData* const top_cp = sd.addr_to_region_ptr(space->top() - 1);
1091   while (cp < end_cp) {
1092     HeapWord* region_destination = cp->destination();
1093     const size_t cur_deadwood = pointer_delta(dense_prefix, region_destination);
1094     if (TraceParallelOldGCDensePrefix && Verbose) {
1095       tty->print_cr("c#=" SIZE_FORMAT_W(4) " dst=" PTR_FORMAT " "
1096                     "dp=" PTR_FORMAT " " "cdw=" SIZE_FORMAT_W(8),
1097                     sd.region(cp), p2i(region_destination),
1098                     p2i(dense_prefix), cur_deadwood);
1099     }
1100 
1101     if (cur_deadwood >= deadwood_goal) {
1102       // Found the region that has the correct amount of deadwood to the left.
1103       // This typically occurs after crossing a fairly sparse set of regions, so
1104       // iterate backwards over those sparse regions, looking for the region
1105       // that has the lowest density of live objects 'to the right.'
1106       size_t space_to_left = sd.region(cp) * region_size;
1107       size_t live_to_left = space_to_left - cur_deadwood;
1108       size_t space_to_right = space_capacity - space_to_left;
1109       size_t live_to_right = space_live - live_to_left;
1110       double density_to_right = double(live_to_right) / space_to_right;
1111       while (cp > full_cp) {
1112         --cp;
1113         const size_t prev_region_live_to_right = live_to_right -
1114           cp->data_size();
1115         const size_t prev_region_space_to_right = space_to_right + region_size;
1116         double prev_region_density_to_right =
1117           double(prev_region_live_to_right) / prev_region_space_to_right;
1118         if (density_to_right <= prev_region_density_to_right) {
1119           return dense_prefix;
1120         }
1121         if (TraceParallelOldGCDensePrefix && Verbose) {
1122           tty->print_cr("backing up from c=" SIZE_FORMAT_W(4) " d2r=%10.8f "
1123                         "pc_d2r=%10.8f", sd.region(cp), density_to_right,
1124                         prev_region_density_to_right);
1125         }
1126         dense_prefix -= region_size;
1127         live_to_right = prev_region_live_to_right;
1128         space_to_right = prev_region_space_to_right;
1129         density_to_right = prev_region_density_to_right;
1130       }
1131       return dense_prefix;
1132     }
1133 
1134     dense_prefix += region_size;
1135     ++cp;
1136   }
1137 
1138   return dense_prefix;
1139 }
1140 
1141 #ifndef PRODUCT
1142 void PSParallelCompact::print_dense_prefix_stats(const char* const algorithm,
1143                                                  const SpaceId id,
1144                                                  const bool maximum_compaction,
1145                                                  HeapWord* const addr)
1146 {
1147   const size_t region_idx = summary_data().addr_to_region_idx(addr);
1148   RegionData* const cp = summary_data().region(region_idx);
1149   const MutableSpace* const space = _space_info[id].space();
1150   HeapWord* const new_top = _space_info[id].new_top();
1151 
1152   const size_t space_live = pointer_delta(new_top, space->bottom());
1153   const size_t dead_to_left = pointer_delta(addr, cp->destination());
1154   const size_t space_cap = space->capacity_in_words();
1155   const double dead_to_left_pct = double(dead_to_left) / space_cap;
1156   const size_t live_to_right = new_top - cp->destination();
1157   const size_t dead_to_right = space->top() - addr - live_to_right;
1158 
1159   tty->print_cr("%s=" PTR_FORMAT " dpc=" SIZE_FORMAT_W(5) " "
1160                 "spl=" SIZE_FORMAT " "
1161                 "d2l=" SIZE_FORMAT " d2l%%=%6.4f "
1162                 "d2r=" SIZE_FORMAT " l2r=" SIZE_FORMAT
1163                 " ratio=%10.8f",
1164                 algorithm, p2i(addr), region_idx,
1165                 space_live,
1166                 dead_to_left, dead_to_left_pct,
1167                 dead_to_right, live_to_right,
1168                 double(dead_to_right) / live_to_right);
1169 }
1170 #endif  // #ifndef PRODUCT
1171 
1172 // Return a fraction indicating how much of the generation can be treated as
1173 // "dead wood" (i.e., not reclaimed).  The function uses a normal distribution
1174 // based on the density of live objects in the generation to determine a limit,
1175 // which is then adjusted so the return value is min_percent when the density is
1176 // 1.
1177 //
1178 // The following table shows some return values for a different values of the
1179 // standard deviation (ParallelOldDeadWoodLimiterStdDev); the mean is 0.5 and
1180 // min_percent is 1.
1181 //
1182 //                          fraction allowed as dead wood
1183 //         -----------------------------------------------------------------
1184 // density std_dev=70 std_dev=75 std_dev=80 std_dev=85 std_dev=90 std_dev=95
1185 // ------- ---------- ---------- ---------- ---------- ---------- ----------
1186 // 0.00000 0.01000000 0.01000000 0.01000000 0.01000000 0.01000000 0.01000000
1187 // 0.05000 0.03193096 0.02836880 0.02550828 0.02319280 0.02130337 0.01974941
1188 // 0.10000 0.05247504 0.04547452 0.03988045 0.03537016 0.03170171 0.02869272
1189 // 0.15000 0.07135702 0.06111390 0.05296419 0.04641639 0.04110601 0.03676066
1190 // 0.20000 0.08831616 0.07509618 0.06461766 0.05622444 0.04943437 0.04388975
1191 // 0.25000 0.10311208 0.08724696 0.07471205 0.06469760 0.05661313 0.05002313
1192 // 0.30000 0.11553050 0.09741183 0.08313394 0.07175114 0.06257797 0.05511132
1193 // 0.35000 0.12538832 0.10545958 0.08978741 0.07731366 0.06727491 0.05911289
1194 // 0.40000 0.13253818 0.11128511 0.09459590 0.08132834 0.07066107 0.06199500
1195 // 0.45000 0.13687208 0.11481163 0.09750361 0.08375387 0.07270534 0.06373386
1196 // 0.50000 0.13832410 0.11599237 0.09847664 0.08456518 0.07338887 0.06431510
1197 // 0.55000 0.13687208 0.11481163 0.09750361 0.08375387 0.07270534 0.06373386
1198 // 0.60000 0.13253818 0.11128511 0.09459590 0.08132834 0.07066107 0.06199500
1199 // 0.65000 0.12538832 0.10545958 0.08978741 0.07731366 0.06727491 0.05911289
1200 // 0.70000 0.11553050 0.09741183 0.08313394 0.07175114 0.06257797 0.05511132
1201 // 0.75000 0.10311208 0.08724696 0.07471205 0.06469760 0.05661313 0.05002313
1202 // 0.80000 0.08831616 0.07509618 0.06461766 0.05622444 0.04943437 0.04388975
1203 // 0.85000 0.07135702 0.06111390 0.05296419 0.04641639 0.04110601 0.03676066
1204 // 0.90000 0.05247504 0.04547452 0.03988045 0.03537016 0.03170171 0.02869272
1205 // 0.95000 0.03193096 0.02836880 0.02550828 0.02319280 0.02130337 0.01974941
1206 // 1.00000 0.01000000 0.01000000 0.01000000 0.01000000 0.01000000 0.01000000
1207 
1208 double PSParallelCompact::dead_wood_limiter(double density, size_t min_percent)
1209 {
1210   assert(_dwl_initialized, "uninitialized");
1211 
1212   // The raw limit is the value of the normal distribution at x = density.
1213   const double raw_limit = normal_distribution(density);
1214 
1215   // Adjust the raw limit so it becomes the minimum when the density is 1.
1216   //
1217   // First subtract the adjustment value (which is simply the precomputed value
1218   // normal_distribution(1.0)); this yields a value of 0 when the density is 1.
1219   // Then add the minimum value, so the minimum is returned when the density is
1220   // 1.  Finally, prevent negative values, which occur when the mean is not 0.5.
1221   const double min = double(min_percent) / 100.0;
1222   const double limit = raw_limit - _dwl_adjustment + min;
1223   return MAX2(limit, 0.0);
1224 }
1225 
1226 ParallelCompactData::RegionData*
1227 PSParallelCompact::first_dead_space_region(const RegionData* beg,
1228                                            const RegionData* end)
1229 {
1230   const size_t region_size = ParallelCompactData::RegionSize;
1231   ParallelCompactData& sd = summary_data();
1232   size_t left = sd.region(beg);
1233   size_t right = end > beg ? sd.region(end) - 1 : left;
1234 
1235   // Binary search.
1236   while (left < right) {
1237     // Equivalent to (left + right) / 2, but does not overflow.
1238     const size_t middle = left + (right - left) / 2;
1239     RegionData* const middle_ptr = sd.region(middle);
1240     HeapWord* const dest = middle_ptr->destination();
1241     HeapWord* const addr = sd.region_to_addr(middle);
1242     assert(dest != NULL, "sanity");
1243     assert(dest <= addr, "must move left");
1244 
1245     if (middle > left && dest < addr) {
1246       right = middle - 1;
1247     } else if (middle < right && middle_ptr->data_size() == region_size) {
1248       left = middle + 1;
1249     } else {
1250       return middle_ptr;
1251     }
1252   }
1253   return sd.region(left);
1254 }
1255 
1256 ParallelCompactData::RegionData*
1257 PSParallelCompact::dead_wood_limit_region(const RegionData* beg,
1258                                           const RegionData* end,
1259                                           size_t dead_words)
1260 {
1261   ParallelCompactData& sd = summary_data();
1262   size_t left = sd.region(beg);
1263   size_t right = end > beg ? sd.region(end) - 1 : left;
1264 
1265   // Binary search.
1266   while (left < right) {
1267     // Equivalent to (left + right) / 2, but does not overflow.
1268     const size_t middle = left + (right - left) / 2;
1269     RegionData* const middle_ptr = sd.region(middle);
1270     HeapWord* const dest = middle_ptr->destination();
1271     HeapWord* const addr = sd.region_to_addr(middle);
1272     assert(dest != NULL, "sanity");
1273     assert(dest <= addr, "must move left");
1274 
1275     const size_t dead_to_left = pointer_delta(addr, dest);
1276     if (middle > left && dead_to_left > dead_words) {
1277       right = middle - 1;
1278     } else if (middle < right && dead_to_left < dead_words) {
1279       left = middle + 1;
1280     } else {
1281       return middle_ptr;
1282     }
1283   }
1284   return sd.region(left);
1285 }
1286 
1287 // The result is valid during the summary phase, after the initial summarization
1288 // of each space into itself, and before final summarization.
1289 inline double
1290 PSParallelCompact::reclaimed_ratio(const RegionData* const cp,
1291                                    HeapWord* const bottom,
1292                                    HeapWord* const top,
1293                                    HeapWord* const new_top)
1294 {
1295   ParallelCompactData& sd = summary_data();
1296 
1297   assert(cp != NULL, "sanity");
1298   assert(bottom != NULL, "sanity");
1299   assert(top != NULL, "sanity");
1300   assert(new_top != NULL, "sanity");
1301   assert(top >= new_top, "summary data problem?");
1302   assert(new_top > bottom, "space is empty; should not be here");
1303   assert(new_top >= cp->destination(), "sanity");
1304   assert(top >= sd.region_to_addr(cp), "sanity");
1305 
1306   HeapWord* const destination = cp->destination();
1307   const size_t dense_prefix_live  = pointer_delta(destination, bottom);
1308   const size_t compacted_region_live = pointer_delta(new_top, destination);
1309   const size_t compacted_region_used = pointer_delta(top,
1310                                                      sd.region_to_addr(cp));
1311   const size_t reclaimable = compacted_region_used - compacted_region_live;
1312 
1313   const double divisor = dense_prefix_live + 1.25 * compacted_region_live;
1314   return double(reclaimable) / divisor;
1315 }
1316 
1317 // Return the address of the end of the dense prefix, a.k.a. the start of the
1318 // compacted region.  The address is always on a region boundary.
1319 //
1320 // Completely full regions at the left are skipped, since no compaction can
1321 // occur in those regions.  Then the maximum amount of dead wood to allow is
1322 // computed, based on the density (amount live / capacity) of the generation;
1323 // the region with approximately that amount of dead space to the left is
1324 // identified as the limit region.  Regions between the last completely full
1325 // region and the limit region are scanned and the one that has the best
1326 // (maximum) reclaimed_ratio() is selected.
1327 HeapWord*
1328 PSParallelCompact::compute_dense_prefix(const SpaceId id,
1329                                         bool maximum_compaction)
1330 {
1331   const size_t region_size = ParallelCompactData::RegionSize;
1332   const ParallelCompactData& sd = summary_data();
1333 
1334   const MutableSpace* const space = _space_info[id].space();
1335   HeapWord* const top = space->top();
1336   HeapWord* const top_aligned_up = sd.region_align_up(top);
1337   HeapWord* const new_top = _space_info[id].new_top();
1338   HeapWord* const new_top_aligned_up = sd.region_align_up(new_top);
1339   HeapWord* const bottom = space->bottom();
1340   const RegionData* const beg_cp = sd.addr_to_region_ptr(bottom);
1341   const RegionData* const top_cp = sd.addr_to_region_ptr(top_aligned_up);
1342   const RegionData* const new_top_cp =
1343     sd.addr_to_region_ptr(new_top_aligned_up);
1344 
1345   // Skip full regions at the beginning of the space--they are necessarily part
1346   // of the dense prefix.
1347   const RegionData* const full_cp = first_dead_space_region(beg_cp, new_top_cp);
1348   assert(full_cp->destination() == sd.region_to_addr(full_cp) ||
1349          space->is_empty(), "no dead space allowed to the left");
1350   assert(full_cp->data_size() < region_size || full_cp == new_top_cp - 1,
1351          "region must have dead space");
1352 
1353   // The gc number is saved whenever a maximum compaction is done, and used to
1354   // determine when the maximum compaction interval has expired.  This avoids
1355   // successive max compactions for different reasons.
1356   assert(total_invocations() >= _maximum_compaction_gc_num, "sanity");
1357   const size_t gcs_since_max = total_invocations() - _maximum_compaction_gc_num;
1358   const bool interval_ended = gcs_since_max > HeapMaximumCompactionInterval ||
1359     total_invocations() == HeapFirstMaximumCompactionCount;
1360   if (maximum_compaction || full_cp == top_cp || interval_ended) {
1361     _maximum_compaction_gc_num = total_invocations();
1362     return sd.region_to_addr(full_cp);
1363   }
1364 
1365   const size_t space_live = pointer_delta(new_top, bottom);
1366   const size_t space_used = space->used_in_words();
1367   const size_t space_capacity = space->capacity_in_words();
1368 
1369   const double density = double(space_live) / double(space_capacity);
1370   const size_t min_percent_free = MarkSweepDeadRatio;
1371   const double limiter = dead_wood_limiter(density, min_percent_free);
1372   const size_t dead_wood_max = space_used - space_live;
1373   const size_t dead_wood_limit = MIN2(size_t(space_capacity * limiter),
1374                                       dead_wood_max);
1375 
1376   if (TraceParallelOldGCDensePrefix) {
1377     tty->print_cr("space_live=" SIZE_FORMAT " " "space_used=" SIZE_FORMAT " "
1378                   "space_cap=" SIZE_FORMAT,
1379                   space_live, space_used,
1380                   space_capacity);
1381     tty->print_cr("dead_wood_limiter(%6.4f, " SIZE_FORMAT ")=%6.4f "
1382                   "dead_wood_max=" SIZE_FORMAT " dead_wood_limit=" SIZE_FORMAT,
1383                   density, min_percent_free, limiter,
1384                   dead_wood_max, dead_wood_limit);
1385   }
1386 
1387   // Locate the region with the desired amount of dead space to the left.
1388   const RegionData* const limit_cp =
1389     dead_wood_limit_region(full_cp, top_cp, dead_wood_limit);
1390 
1391   // Scan from the first region with dead space to the limit region and find the
1392   // one with the best (largest) reclaimed ratio.
1393   double best_ratio = 0.0;
1394   const RegionData* best_cp = full_cp;
1395   for (const RegionData* cp = full_cp; cp < limit_cp; ++cp) {
1396     double tmp_ratio = reclaimed_ratio(cp, bottom, top, new_top);
1397     if (tmp_ratio > best_ratio) {
1398       best_cp = cp;
1399       best_ratio = tmp_ratio;
1400     }
1401   }
1402 
1403   return sd.region_to_addr(best_cp);
1404 }
1405 
1406 void PSParallelCompact::summarize_spaces_quick()
1407 {
1408   for (unsigned int i = 0; i < last_space_id; ++i) {
1409     const MutableSpace* space = _space_info[i].space();
1410     HeapWord** nta = _space_info[i].new_top_addr();
1411     bool result = _summary_data.summarize(_space_info[i].split_info(),
1412                                           space->bottom(), space->top(), NULL,
1413                                           space->bottom(), space->end(), nta);
1414     assert(result, "space must fit into itself");
1415     _space_info[i].set_dense_prefix(space->bottom());
1416   }
1417 }
1418 
1419 void PSParallelCompact::fill_dense_prefix_end(SpaceId id)
1420 {
1421   HeapWord* const dense_prefix_end = dense_prefix(id);
1422   const RegionData* region = _summary_data.addr_to_region_ptr(dense_prefix_end);
1423   const idx_t dense_prefix_bit = _mark_bitmap.addr_to_bit(dense_prefix_end);
1424   if (dead_space_crosses_boundary(region, dense_prefix_bit)) {
1425     // Only enough dead space is filled so that any remaining dead space to the
1426     // left is larger than the minimum filler object.  (The remainder is filled
1427     // during the copy/update phase.)
1428     //
1429     // The size of the dead space to the right of the boundary is not a
1430     // concern, since compaction will be able to use whatever space is
1431     // available.
1432     //
1433     // Here '||' is the boundary, 'x' represents a don't care bit and a box
1434     // surrounds the space to be filled with an object.
1435     //
1436     // In the 32-bit VM, each bit represents two 32-bit words:
1437     //                              +---+
1438     // a) beg_bits:  ...  x   x   x | 0 | ||   0   x  x  ...
1439     //    end_bits:  ...  x   x   x | 0 | ||   0   x  x  ...
1440     //                              +---+
1441     //
1442     // In the 64-bit VM, each bit represents one 64-bit word:
1443     //                              +------------+
1444     // b) beg_bits:  ...  x   x   x | 0   ||   0 | x  x  ...
1445     //    end_bits:  ...  x   x   1 | 0   ||   0 | x  x  ...
1446     //                              +------------+
1447     //                          +-------+
1448     // c) beg_bits:  ...  x   x | 0   0 | ||   0   x  x  ...
1449     //    end_bits:  ...  x   1 | 0   0 | ||   0   x  x  ...
1450     //                          +-------+
1451     //                      +-----------+
1452     // d) beg_bits:  ...  x | 0   0   0 | ||   0   x  x  ...
1453     //    end_bits:  ...  1 | 0   0   0 | ||   0   x  x  ...
1454     //                      +-----------+
1455     //                          +-------+
1456     // e) beg_bits:  ...  0   0 | 0   0 | ||   0   x  x  ...
1457     //    end_bits:  ...  0   0 | 0   0 | ||   0   x  x  ...
1458     //                          +-------+
1459 
1460     // Initially assume case a, c or e will apply.
1461     size_t obj_len = CollectedHeap::min_fill_size();
1462     HeapWord* obj_beg = dense_prefix_end - obj_len;
1463 
1464 #ifdef  _LP64
1465     if (MinObjAlignment > 1) { // object alignment > heap word size
1466       // Cases a, c or e.
1467     } else if (_mark_bitmap.is_obj_end(dense_prefix_bit - 2)) {
1468       // Case b above.
1469       obj_beg = dense_prefix_end - 1;
1470     } else if (!_mark_bitmap.is_obj_end(dense_prefix_bit - 3) &&
1471                _mark_bitmap.is_obj_end(dense_prefix_bit - 4)) {
1472       // Case d above.
1473       obj_beg = dense_prefix_end - 3;
1474       obj_len = 3;
1475     }
1476 #endif  // #ifdef _LP64
1477 
1478     CollectedHeap::fill_with_object(obj_beg, obj_len);
1479     _mark_bitmap.mark_obj(obj_beg, obj_len);
1480     _summary_data.add_obj(obj_beg, obj_len);
1481     assert(start_array(id) != NULL, "sanity");
1482     start_array(id)->allocate_block(obj_beg);
1483   }
1484 }
1485 
1486 void
1487 PSParallelCompact::clear_source_region(HeapWord* beg_addr, HeapWord* end_addr)
1488 {
1489   RegionData* const beg_ptr = _summary_data.addr_to_region_ptr(beg_addr);
1490   HeapWord* const end_aligned_up = _summary_data.region_align_up(end_addr);
1491   RegionData* const end_ptr = _summary_data.addr_to_region_ptr(end_aligned_up);
1492   for (RegionData* cur = beg_ptr; cur < end_ptr; ++cur) {
1493     cur->set_source_region(0);
1494   }
1495 }
1496 
1497 void
1498 PSParallelCompact::summarize_space(SpaceId id, bool maximum_compaction)
1499 {
1500   assert(id < last_space_id, "id out of range");
1501   assert(_space_info[id].dense_prefix() == _space_info[id].space()->bottom(),
1502          "should have been reset in summarize_spaces_quick()");
1503 
1504   const MutableSpace* space = _space_info[id].space();
1505   if (_space_info[id].new_top() != space->bottom()) {
1506     HeapWord* dense_prefix_end = compute_dense_prefix(id, maximum_compaction);
1507     _space_info[id].set_dense_prefix(dense_prefix_end);
1508 
1509 #ifndef PRODUCT
1510     if (TraceParallelOldGCDensePrefix) {
1511       print_dense_prefix_stats("ratio", id, maximum_compaction,
1512                                dense_prefix_end);
1513       HeapWord* addr = compute_dense_prefix_via_density(id, maximum_compaction);
1514       print_dense_prefix_stats("density", id, maximum_compaction, addr);
1515     }
1516 #endif  // #ifndef PRODUCT
1517 
1518     // Recompute the summary data, taking into account the dense prefix.  If
1519     // every last byte will be reclaimed, then the existing summary data which
1520     // compacts everything can be left in place.
1521     if (!maximum_compaction && dense_prefix_end != space->bottom()) {
1522       // If dead space crosses the dense prefix boundary, it is (at least
1523       // partially) filled with a dummy object, marked live and added to the
1524       // summary data.  This simplifies the copy/update phase and must be done
1525       // before the final locations of objects are determined, to prevent
1526       // leaving a fragment of dead space that is too small to fill.
1527       fill_dense_prefix_end(id);
1528 
1529       // Compute the destination of each Region, and thus each object.
1530       _summary_data.summarize_dense_prefix(space->bottom(), dense_prefix_end);
1531       _summary_data.summarize(_space_info[id].split_info(),
1532                               dense_prefix_end, space->top(), NULL,
1533                               dense_prefix_end, space->end(),
1534                               _space_info[id].new_top_addr());
1535     }
1536   }
1537 
1538   if (develop_log_is_enabled(Trace, gc, compaction, phases)) {
1539     const size_t region_size = ParallelCompactData::RegionSize;
1540     HeapWord* const dense_prefix_end = _space_info[id].dense_prefix();
1541     const size_t dp_region = _summary_data.addr_to_region_idx(dense_prefix_end);
1542     const size_t dp_words = pointer_delta(dense_prefix_end, space->bottom());
1543     HeapWord* const new_top = _space_info[id].new_top();
1544     const HeapWord* nt_aligned_up = _summary_data.region_align_up(new_top);
1545     const size_t cr_words = pointer_delta(nt_aligned_up, dense_prefix_end);
1546     log_develop_trace(gc, compaction, phases)(
1547         "id=%d cap=" SIZE_FORMAT " dp=" PTR_FORMAT " "
1548         "dp_region=" SIZE_FORMAT " " "dp_count=" SIZE_FORMAT " "
1549         "cr_count=" SIZE_FORMAT " " "nt=" PTR_FORMAT,
1550         id, space->capacity_in_words(), p2i(dense_prefix_end),
1551         dp_region, dp_words / region_size,
1552         cr_words / region_size, p2i(new_top));
1553   }
1554 }
1555 
1556 #ifndef PRODUCT
1557 void PSParallelCompact::summary_phase_msg(SpaceId dst_space_id,
1558                                           HeapWord* dst_beg, HeapWord* dst_end,
1559                                           SpaceId src_space_id,
1560                                           HeapWord* src_beg, HeapWord* src_end)
1561 {
1562   log_develop_trace(gc, compaction, phases)(
1563       "Summarizing %d [%s] into %d [%s]:  "
1564       "src=" PTR_FORMAT "-" PTR_FORMAT " "
1565       SIZE_FORMAT "-" SIZE_FORMAT " "
1566       "dst=" PTR_FORMAT "-" PTR_FORMAT " "
1567       SIZE_FORMAT "-" SIZE_FORMAT,
1568       src_space_id, space_names[src_space_id],
1569       dst_space_id, space_names[dst_space_id],
1570       p2i(src_beg), p2i(src_end),
1571       _summary_data.addr_to_region_idx(src_beg),
1572       _summary_data.addr_to_region_idx(src_end),
1573       p2i(dst_beg), p2i(dst_end),
1574       _summary_data.addr_to_region_idx(dst_beg),
1575       _summary_data.addr_to_region_idx(dst_end));
1576 }
1577 #endif  // #ifndef PRODUCT
1578 
1579 void PSParallelCompact::summary_phase(ParCompactionManager* cm,
1580                                       bool maximum_compaction)
1581 {
1582   GCTraceTime(Trace, gc, phases) tm("Summary Phase", &_gc_timer);
1583 
1584 #ifdef  ASSERT
1585   if (TraceParallelOldGCMarkingPhase) {
1586     tty->print_cr("add_obj_count=" SIZE_FORMAT " "
1587                   "add_obj_bytes=" SIZE_FORMAT,
1588                   add_obj_count, add_obj_size * HeapWordSize);
1589     tty->print_cr("mark_bitmap_count=" SIZE_FORMAT " "
1590                   "mark_bitmap_bytes=" SIZE_FORMAT,
1591                   mark_bitmap_count, mark_bitmap_size * HeapWordSize);
1592   }
1593 #endif  // #ifdef ASSERT
1594 
1595   // Quick summarization of each space into itself, to see how much is live.
1596   summarize_spaces_quick();
1597 
1598   log_develop_trace(gc, compaction, phases)("summary phase:  after summarizing each space to self");
1599   NOT_PRODUCT(print_region_ranges());
1600   NOT_PRODUCT(print_initial_summary_data(_summary_data, _space_info));
1601 
1602   // The amount of live data that will end up in old space (assuming it fits).
1603   size_t old_space_total_live = 0;
1604   for (unsigned int id = old_space_id; id < last_space_id; ++id) {
1605     old_space_total_live += pointer_delta(_space_info[id].new_top(),
1606                                           _space_info[id].space()->bottom());
1607   }
1608 
1609   MutableSpace* const old_space = _space_info[old_space_id].space();
1610   const size_t old_capacity = old_space->capacity_in_words();
1611   if (old_space_total_live > old_capacity) {
1612     // XXX - should also try to expand
1613     maximum_compaction = true;
1614   }
1615 
1616   // Old generations.
1617   summarize_space(old_space_id, maximum_compaction);
1618 
1619   // Summarize the remaining spaces in the young gen.  The initial target space
1620   // is the old gen.  If a space does not fit entirely into the target, then the
1621   // remainder is compacted into the space itself and that space becomes the new
1622   // target.
1623   SpaceId dst_space_id = old_space_id;
1624   HeapWord* dst_space_end = old_space->end();
1625   HeapWord** new_top_addr = _space_info[dst_space_id].new_top_addr();
1626   for (unsigned int id = eden_space_id; id < last_space_id; ++id) {
1627     const MutableSpace* space = _space_info[id].space();
1628     const size_t live = pointer_delta(_space_info[id].new_top(),
1629                                       space->bottom());
1630     const size_t available = pointer_delta(dst_space_end, *new_top_addr);
1631 
1632     NOT_PRODUCT(summary_phase_msg(dst_space_id, *new_top_addr, dst_space_end,
1633                                   SpaceId(id), space->bottom(), space->top());)
1634     if (live > 0 && live <= available) {
1635       // All the live data will fit.
1636       bool done = _summary_data.summarize(_space_info[id].split_info(),
1637                                           space->bottom(), space->top(),
1638                                           NULL,
1639                                           *new_top_addr, dst_space_end,
1640                                           new_top_addr);
1641       assert(done, "space must fit into old gen");
1642 
1643       // Reset the new_top value for the space.
1644       _space_info[id].set_new_top(space->bottom());
1645     } else if (live > 0) {
1646       // Attempt to fit part of the source space into the target space.
1647       HeapWord* next_src_addr = NULL;
1648       bool done = _summary_data.summarize(_space_info[id].split_info(),
1649                                           space->bottom(), space->top(),
1650                                           &next_src_addr,
1651                                           *new_top_addr, dst_space_end,
1652                                           new_top_addr);
1653       assert(!done, "space should not fit into old gen");
1654       assert(next_src_addr != NULL, "sanity");
1655 
1656       // The source space becomes the new target, so the remainder is compacted
1657       // within the space itself.
1658       dst_space_id = SpaceId(id);
1659       dst_space_end = space->end();
1660       new_top_addr = _space_info[id].new_top_addr();
1661       NOT_PRODUCT(summary_phase_msg(dst_space_id,
1662                                     space->bottom(), dst_space_end,
1663                                     SpaceId(id), next_src_addr, space->top());)
1664       done = _summary_data.summarize(_space_info[id].split_info(),
1665                                      next_src_addr, space->top(),
1666                                      NULL,
1667                                      space->bottom(), dst_space_end,
1668                                      new_top_addr);
1669       assert(done, "space must fit when compacted into itself");
1670       assert(*new_top_addr <= space->top(), "usage should not grow");
1671     }
1672   }
1673 
1674   log_develop_trace(gc, compaction, phases)("Summary_phase:  after final summarization");
1675   NOT_PRODUCT(print_region_ranges());
1676   NOT_PRODUCT(print_initial_summary_data(_summary_data, _space_info));
1677 }
1678 
1679 // This method should contain all heap-specific policy for invoking a full
1680 // collection.  invoke_no_policy() will only attempt to compact the heap; it
1681 // will do nothing further.  If we need to bail out for policy reasons, scavenge
1682 // before full gc, or any other specialized behavior, it needs to be added here.
1683 //
1684 // Note that this method should only be called from the vm_thread while at a
1685 // safepoint.
1686 //
1687 // Note that the all_soft_refs_clear flag in the collector policy
1688 // may be true because this method can be called without intervening
1689 // activity.  For example when the heap space is tight and full measure
1690 // are being taken to free space.
1691 void PSParallelCompact::invoke(bool maximum_heap_compaction) {
1692   assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
1693   assert(Thread::current() == (Thread*)VMThread::vm_thread(),
1694          "should be in vm thread");
1695 
1696   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
1697   GCCause::Cause gc_cause = heap->gc_cause();
1698   assert(!heap->is_gc_active(), "not reentrant");
1699 
1700   PSAdaptiveSizePolicy* policy = heap->size_policy();
1701   IsGCActiveMark mark;
1702 
1703   if (ScavengeBeforeFullGC) {
1704     PSScavenge::invoke_no_policy();
1705   }
1706 
1707   const bool clear_all_soft_refs =
1708     heap->collector_policy()->should_clear_all_soft_refs();
1709 
1710   PSParallelCompact::invoke_no_policy(clear_all_soft_refs ||
1711                                       maximum_heap_compaction);
1712 }
1713 
1714 // This method contains no policy. You should probably
1715 // be calling invoke() instead.
1716 bool PSParallelCompact::invoke_no_policy(bool maximum_heap_compaction) {
1717   assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
1718   assert(ref_processor() != NULL, "Sanity");
1719 
1720   if (GC_locker::check_active_before_gc()) {
1721     return false;
1722   }
1723 
1724   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
1725 
1726   GCIdMark gc_id_mark;
1727   _gc_timer.register_gc_start();
1728   _gc_tracer.report_gc_start(heap->gc_cause(), _gc_timer.gc_start());
1729 
1730   TimeStamp marking_start;
1731   TimeStamp compaction_start;
1732   TimeStamp collection_exit;
1733 
1734   GCCause::Cause gc_cause = heap->gc_cause();
1735   PSYoungGen* young_gen = heap->young_gen();
1736   PSOldGen* old_gen = heap->old_gen();
1737   PSAdaptiveSizePolicy* size_policy = heap->size_policy();
1738 
1739   // The scope of casr should end after code that can change
1740   // CollectorPolicy::_should_clear_all_soft_refs.
1741   ClearedAllSoftRefs casr(maximum_heap_compaction,
1742                           heap->collector_policy());
1743 
1744   if (ZapUnusedHeapArea) {
1745     // Save information needed to minimize mangling
1746     heap->record_gen_tops_before_GC();
1747   }
1748 
1749   heap->pre_full_gc_dump(&_gc_timer);
1750 
1751   // Make sure data structures are sane, make the heap parsable, and do other
1752   // miscellaneous bookkeeping.
1753   pre_compact();
1754 
1755   PreGCValues pre_gc_values(heap);
1756 
1757   // Get the compaction manager reserved for the VM thread.
1758   ParCompactionManager* const vmthread_cm =
1759     ParCompactionManager::manager_array(gc_task_manager()->workers());
1760 
1761   {
1762     ResourceMark rm;
1763     HandleMark hm;
1764 
1765     // Set the number of GC threads to be used in this collection
1766     gc_task_manager()->set_active_gang();
1767     gc_task_manager()->task_idle_workers();
1768 
1769     GCTraceCPUTime tcpu;
1770     GCTraceTime(Info, gc) tm("Pause Full", NULL, gc_cause, true);
1771     TraceCollectorStats tcs(counters());
1772     TraceMemoryManagerStats tms(true /* Full GC */,gc_cause);
1773 
1774     if (TraceOldGenTime) accumulated_time()->start();
1775 
1776     // Let the size policy know we're starting
1777     size_policy->major_collection_begin();
1778 
1779     CodeCache::gc_prologue();
1780 
1781 #if defined(COMPILER2) || INCLUDE_JVMCI
1782     DerivedPointerTable::clear();
1783 #endif
1784 
1785     ref_processor()->enable_discovery();
1786     ref_processor()->setup_policy(maximum_heap_compaction);
1787 
1788     bool marked_for_unloading = false;
1789 
1790     marking_start.update();
1791     marking_phase(vmthread_cm, maximum_heap_compaction, &_gc_tracer);
1792 
1793     bool max_on_system_gc = UseMaximumCompactionOnSystemGC
1794       && GCCause::is_user_requested_gc(gc_cause);
1795     summary_phase(vmthread_cm, maximum_heap_compaction || max_on_system_gc);
1796 
1797 #if defined(COMPILER2) || INCLUDE_JVMCI
1798     assert(DerivedPointerTable::is_active(), "Sanity");
1799     DerivedPointerTable::set_active(false);
1800 #endif
1801 
1802     // adjust_roots() updates Universe::_intArrayKlassObj which is
1803     // needed by the compaction for filling holes in the dense prefix.
1804     adjust_roots();
1805 
1806     compaction_start.update();
1807     compact();
1808 
1809     // Reset the mark bitmap, summary data, and do other bookkeeping.  Must be
1810     // done before resizing.
1811     post_compact();
1812 
1813     // Let the size policy know we're done
1814     size_policy->major_collection_end(old_gen->used_in_bytes(), gc_cause);
1815 
1816     if (UseAdaptiveSizePolicy) {
1817       log_debug(gc, ergo)("AdaptiveSizeStart: collection: %d ", heap->total_collections());
1818       log_trace(gc, ergo)("old_gen_capacity: " SIZE_FORMAT " young_gen_capacity: " SIZE_FORMAT,
1819                           old_gen->capacity_in_bytes(), young_gen->capacity_in_bytes());
1820 
1821       // Don't check if the size_policy is ready here.  Let
1822       // the size_policy check that internally.
1823       if (UseAdaptiveGenerationSizePolicyAtMajorCollection &&
1824           AdaptiveSizePolicy::should_update_promo_stats(gc_cause)) {
1825         // Swap the survivor spaces if from_space is empty. The
1826         // resize_young_gen() called below is normally used after
1827         // a successful young GC and swapping of survivor spaces;
1828         // otherwise, it will fail to resize the young gen with
1829         // the current implementation.
1830         if (young_gen->from_space()->is_empty()) {
1831           young_gen->from_space()->clear(SpaceDecorator::Mangle);
1832           young_gen->swap_spaces();
1833         }
1834 
1835         // Calculate optimal free space amounts
1836         assert(young_gen->max_size() >
1837           young_gen->from_space()->capacity_in_bytes() +
1838           young_gen->to_space()->capacity_in_bytes(),
1839           "Sizes of space in young gen are out-of-bounds");
1840 
1841         size_t young_live = young_gen->used_in_bytes();
1842         size_t eden_live = young_gen->eden_space()->used_in_bytes();
1843         size_t old_live = old_gen->used_in_bytes();
1844         size_t cur_eden = young_gen->eden_space()->capacity_in_bytes();
1845         size_t max_old_gen_size = old_gen->max_gen_size();
1846         size_t max_eden_size = young_gen->max_size() -
1847           young_gen->from_space()->capacity_in_bytes() -
1848           young_gen->to_space()->capacity_in_bytes();
1849 
1850         // Used for diagnostics
1851         size_policy->clear_generation_free_space_flags();
1852 
1853         size_policy->compute_generations_free_space(young_live,
1854                                                     eden_live,
1855                                                     old_live,
1856                                                     cur_eden,
1857                                                     max_old_gen_size,
1858                                                     max_eden_size,
1859                                                     true /* full gc*/);
1860 
1861         size_policy->check_gc_overhead_limit(young_live,
1862                                              eden_live,
1863                                              max_old_gen_size,
1864                                              max_eden_size,
1865                                              true /* full gc*/,
1866                                              gc_cause,
1867                                              heap->collector_policy());
1868 
1869         size_policy->decay_supplemental_growth(true /* full gc*/);
1870 
1871         heap->resize_old_gen(
1872           size_policy->calculated_old_free_size_in_bytes());
1873 
1874         heap->resize_young_gen(size_policy->calculated_eden_size_in_bytes(),
1875                                size_policy->calculated_survivor_size_in_bytes());
1876       }
1877 
1878       log_debug(gc, ergo)("AdaptiveSizeStop: collection: %d ", heap->total_collections());
1879     }
1880 
1881     if (UsePerfData) {
1882       PSGCAdaptivePolicyCounters* const counters = heap->gc_policy_counters();
1883       counters->update_counters();
1884       counters->update_old_capacity(old_gen->capacity_in_bytes());
1885       counters->update_young_capacity(young_gen->capacity_in_bytes());
1886     }
1887 
1888     heap->resize_all_tlabs();
1889 
1890     // Resize the metaspace capacity after a collection
1891     MetaspaceGC::compute_new_size();
1892 
1893     if (TraceOldGenTime) {
1894       accumulated_time()->stop();
1895     }
1896 
1897     young_gen->print_used_change(pre_gc_values.young_gen_used());
1898     old_gen->print_used_change(pre_gc_values.old_gen_used());
1899     MetaspaceAux::print_metaspace_change(pre_gc_values.metadata_used());
1900 
1901     // Track memory usage and detect low memory
1902     MemoryService::track_memory_usage();
1903     heap->update_counters();
1904     gc_task_manager()->release_idle_workers();
1905   }
1906 
1907 #ifdef ASSERT
1908   for (size_t i = 0; i < ParallelGCThreads + 1; ++i) {
1909     ParCompactionManager* const cm =
1910       ParCompactionManager::manager_array(int(i));
1911     assert(cm->marking_stack()->is_empty(),       "should be empty");
1912     assert(ParCompactionManager::region_list(int(i))->is_empty(), "should be empty");
1913   }
1914 #endif // ASSERT
1915 
1916   if (VerifyAfterGC && heap->total_collections() >= VerifyGCStartAt) {
1917     HandleMark hm;  // Discard invalid handles created during verification
1918     Universe::verify("After GC");
1919   }
1920 
1921   // Re-verify object start arrays
1922   if (VerifyObjectStartArray &&
1923       VerifyAfterGC) {
1924     old_gen->verify_object_start_array();
1925   }
1926 
1927   if (ZapUnusedHeapArea) {
1928     old_gen->object_space()->check_mangled_unused_area_complete();
1929   }
1930 
1931   NOT_PRODUCT(ref_processor()->verify_no_references_recorded());
1932 
1933   collection_exit.update();
1934 
1935   heap->print_heap_after_gc();
1936   heap->trace_heap_after_gc(&_gc_tracer);
1937 
1938   log_debug(gc, task, time)("VM-Thread " JLONG_FORMAT " " JLONG_FORMAT " " JLONG_FORMAT,
1939                          marking_start.ticks(), compaction_start.ticks(),
1940                          collection_exit.ticks());
1941   gc_task_manager()->print_task_time_stamps();
1942 
1943   heap->post_full_gc_dump(&_gc_timer);
1944 
1945 #ifdef TRACESPINNING
1946   ParallelTaskTerminator::print_termination_counts();
1947 #endif
1948 
1949   AdaptiveSizePolicyOutput::print(size_policy, heap->total_collections());
1950 
1951   _gc_timer.register_gc_end();
1952 
1953   _gc_tracer.report_dense_prefix(dense_prefix(old_space_id));
1954   _gc_tracer.report_gc_end(_gc_timer.gc_end(), _gc_timer.time_partitions());
1955 
1956   return true;
1957 }
1958 
1959 bool PSParallelCompact::absorb_live_data_from_eden(PSAdaptiveSizePolicy* size_policy,
1960                                              PSYoungGen* young_gen,
1961                                              PSOldGen* old_gen) {
1962   MutableSpace* const eden_space = young_gen->eden_space();
1963   assert(!eden_space->is_empty(), "eden must be non-empty");
1964   assert(young_gen->virtual_space()->alignment() ==
1965          old_gen->virtual_space()->alignment(), "alignments do not match");
1966 
1967   if (!(UseAdaptiveSizePolicy && UseAdaptiveGCBoundary)) {
1968     return false;
1969   }
1970 
1971   // Both generations must be completely committed.
1972   if (young_gen->virtual_space()->uncommitted_size() != 0) {
1973     return false;
1974   }
1975   if (old_gen->virtual_space()->uncommitted_size() != 0) {
1976     return false;
1977   }
1978 
1979   // Figure out how much to take from eden.  Include the average amount promoted
1980   // in the total; otherwise the next young gen GC will simply bail out to a
1981   // full GC.
1982   const size_t alignment = old_gen->virtual_space()->alignment();
1983   const size_t eden_used = eden_space->used_in_bytes();
1984   const size_t promoted = (size_t)size_policy->avg_promoted()->padded_average();
1985   const size_t absorb_size = align_size_up(eden_used + promoted, alignment);
1986   const size_t eden_capacity = eden_space->capacity_in_bytes();
1987 
1988   if (absorb_size >= eden_capacity) {
1989     return false; // Must leave some space in eden.
1990   }
1991 
1992   const size_t new_young_size = young_gen->capacity_in_bytes() - absorb_size;
1993   if (new_young_size < young_gen->min_gen_size()) {
1994     return false; // Respect young gen minimum size.
1995   }
1996 
1997   log_trace(heap, ergo)(" absorbing " SIZE_FORMAT "K:  "
1998                         "eden " SIZE_FORMAT "K->" SIZE_FORMAT "K "
1999                         "from " SIZE_FORMAT "K, to " SIZE_FORMAT "K "
2000                         "young_gen " SIZE_FORMAT "K->" SIZE_FORMAT "K ",
2001                         absorb_size / K,
2002                         eden_capacity / K, (eden_capacity - absorb_size) / K,
2003                         young_gen->from_space()->used_in_bytes() / K,
2004                         young_gen->to_space()->used_in_bytes() / K,
2005                         young_gen->capacity_in_bytes() / K, new_young_size / K);
2006 
2007   // Fill the unused part of the old gen.
2008   MutableSpace* const old_space = old_gen->object_space();
2009   HeapWord* const unused_start = old_space->top();
2010   size_t const unused_words = pointer_delta(old_space->end(), unused_start);
2011 
2012   if (unused_words > 0) {
2013     if (unused_words < CollectedHeap::min_fill_size()) {
2014       return false;  // If the old gen cannot be filled, must give up.
2015     }
2016     CollectedHeap::fill_with_objects(unused_start, unused_words);
2017   }
2018 
2019   // Take the live data from eden and set both top and end in the old gen to
2020   // eden top.  (Need to set end because reset_after_change() mangles the region
2021   // from end to virtual_space->high() in debug builds).
2022   HeapWord* const new_top = eden_space->top();
2023   old_gen->virtual_space()->expand_into(young_gen->virtual_space(),
2024                                         absorb_size);
2025   young_gen->reset_after_change();
2026   old_space->set_top(new_top);
2027   old_space->set_end(new_top);
2028   old_gen->reset_after_change();
2029 
2030   // Update the object start array for the filler object and the data from eden.
2031   ObjectStartArray* const start_array = old_gen->start_array();
2032   for (HeapWord* p = unused_start; p < new_top; p += oop(p)->size()) {
2033     start_array->allocate_block(p);
2034   }
2035 
2036   // Could update the promoted average here, but it is not typically updated at
2037   // full GCs and the value to use is unclear.  Something like
2038   //
2039   // cur_promoted_avg + absorb_size / number_of_scavenges_since_last_full_gc.
2040 
2041   size_policy->set_bytes_absorbed_from_eden(absorb_size);
2042   return true;
2043 }
2044 
2045 GCTaskManager* const PSParallelCompact::gc_task_manager() {
2046   assert(ParallelScavengeHeap::gc_task_manager() != NULL,
2047     "shouldn't return NULL");
2048   return ParallelScavengeHeap::gc_task_manager();
2049 }
2050 
2051 void PSParallelCompact::marking_phase(ParCompactionManager* cm,
2052                                       bool maximum_heap_compaction,
2053                                       ParallelOldTracer *gc_tracer) {
2054   // Recursively traverse all live objects and mark them
2055   GCTraceTime(Trace, gc, phases) tm("Marking Phase", &_gc_timer);
2056 
2057   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
2058   uint parallel_gc_threads = heap->gc_task_manager()->workers();
2059   uint active_gc_threads = heap->gc_task_manager()->active_workers();
2060   TaskQueueSetSuper* qset = ParCompactionManager::region_array();
2061   ParallelTaskTerminator terminator(active_gc_threads, qset);
2062 
2063   ParCompactionManager::MarkAndPushClosure mark_and_push_closure(cm);
2064   ParCompactionManager::FollowStackClosure follow_stack_closure(cm);
2065 
2066   // Need new claim bits before marking starts.
2067   ClassLoaderDataGraph::clear_claimed_marks();
2068 
2069   {
2070     GCTraceTime(Trace, gc, phases) tm("Par Mark", &_gc_timer);
2071 
2072     ParallelScavengeHeap::ParStrongRootsScope psrs;
2073 
2074     GCTaskQueue* q = GCTaskQueue::create();
2075 
2076     q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::universe));
2077     q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::jni_handles));
2078     // We scan the thread roots in parallel
2079     Threads::create_thread_roots_marking_tasks(q);
2080     q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::object_synchronizer));
2081     q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::flat_profiler));
2082     q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::management));
2083     q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::system_dictionary));
2084     q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::class_loader_data));
2085     q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::jvmti));
2086     q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::code_cache));
2087 
2088     if (active_gc_threads > 1) {
2089       for (uint j = 0; j < active_gc_threads; j++) {
2090         q->enqueue(new StealMarkingTask(&terminator));
2091       }
2092     }
2093 
2094     gc_task_manager()->execute_and_wait(q);
2095   }
2096 
2097   // Process reference objects found during marking
2098   {
2099     GCTraceTime(Trace, gc, phases) tm("Reference Processing", &_gc_timer);
2100 
2101     ReferenceProcessorStats stats;
2102     if (ref_processor()->processing_is_mt()) {
2103       RefProcTaskExecutor task_executor;
2104       stats = ref_processor()->process_discovered_references(
2105         is_alive_closure(), &mark_and_push_closure, &follow_stack_closure,
2106         &task_executor, &_gc_timer);
2107     } else {
2108       stats = ref_processor()->process_discovered_references(
2109         is_alive_closure(), &mark_and_push_closure, &follow_stack_closure, NULL,
2110         &_gc_timer);
2111     }
2112 
2113     gc_tracer->report_gc_reference_stats(stats);
2114   }
2115 
2116   GCTraceTime(Trace, gc) tm_m("Class Unloading", &_gc_timer);
2117 
2118   // This is the point where the entire marking should have completed.
2119   assert(cm->marking_stacks_empty(), "Marking should have completed");
2120 
2121   // Follow system dictionary roots and unload classes.
2122   bool purged_class = SystemDictionary::do_unloading(is_alive_closure());
2123 
2124   // Unload nmethods.
2125   CodeCache::do_unloading(is_alive_closure(), purged_class);
2126 
2127   // Prune dead klasses from subklass/sibling/implementor lists.
2128   Klass::clean_weak_klass_links(is_alive_closure());
2129 
2130   // Delete entries for dead interned strings.
2131   StringTable::unlink(is_alive_closure());
2132 
2133   // Clean up unreferenced symbols in symbol table.
2134   SymbolTable::unlink();
2135   _gc_tracer.report_object_count_after_gc(is_alive_closure());
2136 }
2137 
2138 // This should be moved to the shared markSweep code!
2139 class PSAlwaysTrueClosure: public BoolObjectClosure {
2140 public:
2141   bool do_object_b(oop p) { return true; }
2142 };
2143 static PSAlwaysTrueClosure always_true;
2144 
2145 void PSParallelCompact::adjust_roots() {
2146   // Adjust the pointers to reflect the new locations
2147   GCTraceTime(Trace, gc, phases) tm("Adjust Roots", &_gc_timer);
2148 
2149   // Need new claim bits when tracing through and adjusting pointers.
2150   ClassLoaderDataGraph::clear_claimed_marks();
2151 



2152   // General strong roots.
2153   Universe::oops_do(adjust_pointer_closure());
2154   JNIHandles::oops_do(adjust_pointer_closure());   // Global (strong) JNI handles
2155   CLDToOopClosure adjust_from_cld(adjust_pointer_closure());
2156   Threads::oops_do(adjust_pointer_closure(), &adjust_from_cld, NULL);
2157   ObjectSynchronizer::oops_do(adjust_pointer_closure());
2158   FlatProfiler::oops_do(adjust_pointer_closure());
2159   Management::oops_do(adjust_pointer_closure());
2160   JvmtiExport::oops_do(adjust_pointer_closure());
2161   SystemDictionary::oops_do(adjust_pointer_closure());
2162   ClassLoaderDataGraph::oops_do(adjust_pointer_closure(), adjust_klass_closure(), true);
2163 
2164   // Now adjust pointers in remaining weak roots.  (All of which should
2165   // have been cleared if they pointed to non-surviving objects.)
2166   // Global (weak) JNI handles
2167   JNIHandles::weak_oops_do(&always_true, adjust_pointer_closure());
2168 
2169   CodeBlobToOopClosure adjust_from_blobs(adjust_pointer_closure(), CodeBlobToOopClosure::FixRelocations);
2170   CodeCache::blobs_do(&adjust_from_blobs);
2171   StringTable::oops_do(adjust_pointer_closure());
2172   ref_processor()->weak_oops_do(adjust_pointer_closure());
2173   // Roots were visited so references into the young gen in roots
2174   // may have been scanned.  Process them also.
2175   // Should the reference processor have a span that excludes
2176   // young gen objects?
2177   PSScavenge::reference_processor()->weak_oops_do(adjust_pointer_closure());
2178 }
2179 
2180 // Helper class to print 8 region numbers per line and then print the total at the end.
2181 class FillableRegionLogger : public StackObj {
2182 private:
2183   LogHandle(gc, compaction) log;
2184   static const int LineLength = 8;
2185   size_t _regions[LineLength];
2186   int _next_index;
2187   bool _enabled;
2188   size_t _total_regions;
2189 public:
2190   FillableRegionLogger() : _next_index(0), _total_regions(0), _enabled(develop_log_is_enabled(Trace, gc, compaction)) { }
2191   ~FillableRegionLogger() {
2192     log.trace(SIZE_FORMAT " initially fillable regions", _total_regions);
2193   }
2194 
2195   void print_line() {
2196     if (!_enabled || _next_index == 0) {
2197       return;
2198     }
2199     FormatBuffer<> line("Fillable: ");
2200     for (int i = 0; i < _next_index; i++) {
2201       line.append(" " SIZE_FORMAT_W(7), _regions[i]);
2202     }
2203     log.trace("%s", line.buffer());
2204     _next_index = 0;
2205   }
2206 
2207   void handle(size_t region) {
2208     if (!_enabled) {
2209       return;
2210     }
2211     _regions[_next_index++] = region;
2212     if (_next_index == LineLength) {
2213       print_line();
2214     }
2215     _total_regions++;
2216   }
2217 };
2218 
2219 void PSParallelCompact::enqueue_region_draining_tasks(GCTaskQueue* q,
2220                                                       uint parallel_gc_threads)
2221 {
2222   GCTraceTime(Trace, gc, phases) tm("Drain Task Setup", &_gc_timer);
2223 
2224   // Find the threads that are active
2225   unsigned int which = 0;
2226 
2227   const uint task_count = MAX2(parallel_gc_threads, 1U);
2228   for (uint j = 0; j < task_count; j++) {
2229     q->enqueue(new DrainStacksCompactionTask(j));
2230     ParCompactionManager::verify_region_list_empty(j);
2231     // Set the region stacks variables to "no" region stack values
2232     // so that they will be recognized and needing a region stack
2233     // in the stealing tasks if they do not get one by executing
2234     // a draining stack.
2235     ParCompactionManager* cm = ParCompactionManager::manager_array(j);
2236     cm->set_region_stack(NULL);
2237     cm->set_region_stack_index((uint)max_uintx);
2238   }
2239   ParCompactionManager::reset_recycled_stack_index();
2240 
2241   // Find all regions that are available (can be filled immediately) and
2242   // distribute them to the thread stacks.  The iteration is done in reverse
2243   // order (high to low) so the regions will be removed in ascending order.
2244 
2245   const ParallelCompactData& sd = PSParallelCompact::summary_data();
2246 
2247   // A region index which corresponds to the tasks created above.
2248   // "which" must be 0 <= which < task_count
2249 
2250   which = 0;
2251   // id + 1 is used to test termination so unsigned  can
2252   // be used with an old_space_id == 0.
2253   FillableRegionLogger region_logger;
2254   for (unsigned int id = to_space_id; id + 1 > old_space_id; --id) {
2255     SpaceInfo* const space_info = _space_info + id;
2256     MutableSpace* const space = space_info->space();
2257     HeapWord* const new_top = space_info->new_top();
2258 
2259     const size_t beg_region = sd.addr_to_region_idx(space_info->dense_prefix());
2260     const size_t end_region =
2261       sd.addr_to_region_idx(sd.region_align_up(new_top));
2262 
2263     for (size_t cur = end_region - 1; cur + 1 > beg_region; --cur) {
2264       if (sd.region(cur)->claim_unsafe()) {
2265         ParCompactionManager::region_list_push(which, cur);
2266         region_logger.handle(cur);
2267         // Assign regions to tasks in round-robin fashion.
2268         if (++which == task_count) {
2269           assert(which <= parallel_gc_threads,
2270             "Inconsistent number of workers");
2271           which = 0;
2272         }
2273       }
2274     }
2275     region_logger.print_line();
2276   }
2277 }
2278 
2279 #define PAR_OLD_DENSE_PREFIX_OVER_PARTITIONING 4
2280 
2281 void PSParallelCompact::enqueue_dense_prefix_tasks(GCTaskQueue* q,
2282                                                     uint parallel_gc_threads) {
2283   GCTraceTime(Trace, gc, phases) tm("Dense Prefix Task Setup", &_gc_timer);
2284 
2285   ParallelCompactData& sd = PSParallelCompact::summary_data();
2286 
2287   // Iterate over all the spaces adding tasks for updating
2288   // regions in the dense prefix.  Assume that 1 gc thread
2289   // will work on opening the gaps and the remaining gc threads
2290   // will work on the dense prefix.
2291   unsigned int space_id;
2292   for (space_id = old_space_id; space_id < last_space_id; ++ space_id) {
2293     HeapWord* const dense_prefix_end = _space_info[space_id].dense_prefix();
2294     const MutableSpace* const space = _space_info[space_id].space();
2295 
2296     if (dense_prefix_end == space->bottom()) {
2297       // There is no dense prefix for this space.
2298       continue;
2299     }
2300 
2301     // The dense prefix is before this region.
2302     size_t region_index_end_dense_prefix =
2303         sd.addr_to_region_idx(dense_prefix_end);
2304     RegionData* const dense_prefix_cp =
2305       sd.region(region_index_end_dense_prefix);
2306     assert(dense_prefix_end == space->end() ||
2307            dense_prefix_cp->available() ||
2308            dense_prefix_cp->claimed(),
2309            "The region after the dense prefix should always be ready to fill");
2310 
2311     size_t region_index_start = sd.addr_to_region_idx(space->bottom());
2312 
2313     // Is there dense prefix work?
2314     size_t total_dense_prefix_regions =
2315       region_index_end_dense_prefix - region_index_start;
2316     // How many regions of the dense prefix should be given to
2317     // each thread?
2318     if (total_dense_prefix_regions > 0) {
2319       uint tasks_for_dense_prefix = 1;
2320       if (total_dense_prefix_regions <=
2321           (parallel_gc_threads * PAR_OLD_DENSE_PREFIX_OVER_PARTITIONING)) {
2322         // Don't over partition.  This assumes that
2323         // PAR_OLD_DENSE_PREFIX_OVER_PARTITIONING is a small integer value
2324         // so there are not many regions to process.
2325         tasks_for_dense_prefix = parallel_gc_threads;
2326       } else {
2327         // Over partition
2328         tasks_for_dense_prefix = parallel_gc_threads *
2329           PAR_OLD_DENSE_PREFIX_OVER_PARTITIONING;
2330       }
2331       size_t regions_per_thread = total_dense_prefix_regions /
2332         tasks_for_dense_prefix;
2333       // Give each thread at least 1 region.
2334       if (regions_per_thread == 0) {
2335         regions_per_thread = 1;
2336       }
2337 
2338       for (uint k = 0; k < tasks_for_dense_prefix; k++) {
2339         if (region_index_start >= region_index_end_dense_prefix) {
2340           break;
2341         }
2342         // region_index_end is not processed
2343         size_t region_index_end = MIN2(region_index_start + regions_per_thread,
2344                                        region_index_end_dense_prefix);
2345         q->enqueue(new UpdateDensePrefixTask(SpaceId(space_id),
2346                                              region_index_start,
2347                                              region_index_end));
2348         region_index_start = region_index_end;
2349       }
2350     }
2351     // This gets any part of the dense prefix that did not
2352     // fit evenly.
2353     if (region_index_start < region_index_end_dense_prefix) {
2354       q->enqueue(new UpdateDensePrefixTask(SpaceId(space_id),
2355                                            region_index_start,
2356                                            region_index_end_dense_prefix));
2357     }
2358   }
2359 }
2360 
2361 void PSParallelCompact::enqueue_region_stealing_tasks(
2362                                      GCTaskQueue* q,
2363                                      ParallelTaskTerminator* terminator_ptr,
2364                                      uint parallel_gc_threads) {
2365   GCTraceTime(Trace, gc, phases) tm("Steal Task Setup", &_gc_timer);
2366 
2367   // Once a thread has drained it's stack, it should try to steal regions from
2368   // other threads.
2369   if (parallel_gc_threads > 1) {
2370     for (uint j = 0; j < parallel_gc_threads; j++) {
2371       q->enqueue(new StealRegionCompactionTask(terminator_ptr));
2372     }
2373   }
2374 }
2375 
2376 #ifdef ASSERT
2377 // Write a histogram of the number of times the block table was filled for a
2378 // region.
2379 void PSParallelCompact::write_block_fill_histogram()
2380 {
2381   if (!develop_log_is_enabled(Trace, gc, compaction)) {
2382     return;
2383   }
2384 
2385   LogHandle(gc, compaction) log;
2386   ResourceMark rm;
2387   outputStream* out = log.trace_stream();
2388 
2389   typedef ParallelCompactData::RegionData rd_t;
2390   ParallelCompactData& sd = summary_data();
2391 
2392   for (unsigned int id = old_space_id; id < last_space_id; ++id) {
2393     MutableSpace* const spc = _space_info[id].space();
2394     if (spc->bottom() != spc->top()) {
2395       const rd_t* const beg = sd.addr_to_region_ptr(spc->bottom());
2396       HeapWord* const top_aligned_up = sd.region_align_up(spc->top());
2397       const rd_t* const end = sd.addr_to_region_ptr(top_aligned_up);
2398 
2399       size_t histo[5] = { 0, 0, 0, 0, 0 };
2400       const size_t histo_len = sizeof(histo) / sizeof(size_t);
2401       const size_t region_cnt = pointer_delta(end, beg, sizeof(rd_t));
2402 
2403       for (const rd_t* cur = beg; cur < end; ++cur) {
2404         ++histo[MIN2(cur->blocks_filled_count(), histo_len - 1)];
2405       }
2406       out->print("Block fill histogram: %u %-4s" SIZE_FORMAT_W(5), id, space_names[id], region_cnt);
2407       for (size_t i = 0; i < histo_len; ++i) {
2408         out->print(" " SIZE_FORMAT_W(5) " %5.1f%%",
2409                    histo[i], 100.0 * histo[i] / region_cnt);
2410       }
2411       out->cr();
2412     }
2413   }
2414 }
2415 #endif // #ifdef ASSERT
2416 
2417 void PSParallelCompact::compact() {
2418   GCTraceTime(Trace, gc, phases) tm("Compaction Phase", &_gc_timer);
2419 
2420   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
2421   PSOldGen* old_gen = heap->old_gen();
2422   old_gen->start_array()->reset();
2423   uint parallel_gc_threads = heap->gc_task_manager()->workers();
2424   uint active_gc_threads = heap->gc_task_manager()->active_workers();
2425   TaskQueueSetSuper* qset = ParCompactionManager::region_array();
2426   ParallelTaskTerminator terminator(active_gc_threads, qset);
2427 
2428   GCTaskQueue* q = GCTaskQueue::create();
2429   enqueue_region_draining_tasks(q, active_gc_threads);
2430   enqueue_dense_prefix_tasks(q, active_gc_threads);
2431   enqueue_region_stealing_tasks(q, &terminator, active_gc_threads);
2432 
2433   {
2434     GCTraceTime(Trace, gc, phases) tm("Par Compact", &_gc_timer);
2435 
2436     gc_task_manager()->execute_and_wait(q);
2437 
2438 #ifdef  ASSERT
2439     // Verify that all regions have been processed before the deferred updates.
2440     for (unsigned int id = old_space_id; id < last_space_id; ++id) {
2441       verify_complete(SpaceId(id));
2442     }
2443 #endif
2444   }
2445 
2446   {
2447     // Update the deferred objects, if any.  Any compaction manager can be used.
2448     GCTraceTime(Trace, gc, phases) tm("Deferred Updates", &_gc_timer);
2449     ParCompactionManager* cm = ParCompactionManager::manager_array(0);
2450     for (unsigned int id = old_space_id; id < last_space_id; ++id) {
2451       update_deferred_objects(cm, SpaceId(id));
2452     }
2453   }
2454 
2455   DEBUG_ONLY(write_block_fill_histogram());
2456 }
2457 
2458 #ifdef  ASSERT
2459 void PSParallelCompact::verify_complete(SpaceId space_id) {
2460   // All Regions between space bottom() to new_top() should be marked as filled
2461   // and all Regions between new_top() and top() should be available (i.e.,
2462   // should have been emptied).
2463   ParallelCompactData& sd = summary_data();
2464   SpaceInfo si = _space_info[space_id];
2465   HeapWord* new_top_addr = sd.region_align_up(si.new_top());
2466   HeapWord* old_top_addr = sd.region_align_up(si.space()->top());
2467   const size_t beg_region = sd.addr_to_region_idx(si.space()->bottom());
2468   const size_t new_top_region = sd.addr_to_region_idx(new_top_addr);
2469   const size_t old_top_region = sd.addr_to_region_idx(old_top_addr);
2470 
2471   bool issued_a_warning = false;
2472 
2473   size_t cur_region;
2474   for (cur_region = beg_region; cur_region < new_top_region; ++cur_region) {
2475     const RegionData* const c = sd.region(cur_region);
2476     if (!c->completed()) {
2477       warning("region " SIZE_FORMAT " not filled:  "
2478               "destination_count=%u",
2479               cur_region, c->destination_count());
2480       issued_a_warning = true;
2481     }
2482   }
2483 
2484   for (cur_region = new_top_region; cur_region < old_top_region; ++cur_region) {
2485     const RegionData* const c = sd.region(cur_region);
2486     if (!c->available()) {
2487       warning("region " SIZE_FORMAT " not empty:   "
2488               "destination_count=%u",
2489               cur_region, c->destination_count());
2490       issued_a_warning = true;
2491     }
2492   }
2493 
2494   if (issued_a_warning) {
2495     print_region_ranges();
2496   }
2497 }
2498 #endif  // #ifdef ASSERT
2499 
2500 inline void UpdateOnlyClosure::do_addr(HeapWord* addr) {
2501   _start_array->allocate_block(addr);
2502   compaction_manager()->update_contents(oop(addr));
2503 }
2504 
2505 // Update interior oops in the ranges of regions [beg_region, end_region).
2506 void
2507 PSParallelCompact::update_and_deadwood_in_dense_prefix(ParCompactionManager* cm,
2508                                                        SpaceId space_id,
2509                                                        size_t beg_region,
2510                                                        size_t end_region) {
2511   ParallelCompactData& sd = summary_data();
2512   ParMarkBitMap* const mbm = mark_bitmap();
2513 
2514   HeapWord* beg_addr = sd.region_to_addr(beg_region);
2515   HeapWord* const end_addr = sd.region_to_addr(end_region);
2516   assert(beg_region <= end_region, "bad region range");
2517   assert(end_addr <= dense_prefix(space_id), "not in the dense prefix");
2518 
2519 #ifdef  ASSERT
2520   // Claim the regions to avoid triggering an assert when they are marked as
2521   // filled.
2522   for (size_t claim_region = beg_region; claim_region < end_region; ++claim_region) {
2523     assert(sd.region(claim_region)->claim_unsafe(), "claim() failed");
2524   }
2525 #endif  // #ifdef ASSERT
2526 
2527   if (beg_addr != space(space_id)->bottom()) {
2528     // Find the first live object or block of dead space that *starts* in this
2529     // range of regions.  If a partial object crosses onto the region, skip it;
2530     // it will be marked for 'deferred update' when the object head is
2531     // processed.  If dead space crosses onto the region, it is also skipped; it
2532     // will be filled when the prior region is processed.  If neither of those
2533     // apply, the first word in the region is the start of a live object or dead
2534     // space.
2535     assert(beg_addr > space(space_id)->bottom(), "sanity");
2536     const RegionData* const cp = sd.region(beg_region);
2537     if (cp->partial_obj_size() != 0) {
2538       beg_addr = sd.partial_obj_end(beg_region);
2539     } else if (dead_space_crosses_boundary(cp, mbm->addr_to_bit(beg_addr))) {
2540       beg_addr = mbm->find_obj_beg(beg_addr, end_addr);
2541     }
2542   }
2543 
2544   if (beg_addr < end_addr) {
2545     // A live object or block of dead space starts in this range of Regions.
2546      HeapWord* const dense_prefix_end = dense_prefix(space_id);
2547 
2548     // Create closures and iterate.
2549     UpdateOnlyClosure update_closure(mbm, cm, space_id);
2550     FillClosure fill_closure(cm, space_id);
2551     ParMarkBitMap::IterationStatus status;
2552     status = mbm->iterate(&update_closure, &fill_closure, beg_addr, end_addr,
2553                           dense_prefix_end);
2554     if (status == ParMarkBitMap::incomplete) {
2555       update_closure.do_addr(update_closure.source());
2556     }
2557   }
2558 
2559   // Mark the regions as filled.
2560   RegionData* const beg_cp = sd.region(beg_region);
2561   RegionData* const end_cp = sd.region(end_region);
2562   for (RegionData* cp = beg_cp; cp < end_cp; ++cp) {
2563     cp->set_completed();
2564   }
2565 }
2566 
2567 // Return the SpaceId for the space containing addr.  If addr is not in the
2568 // heap, last_space_id is returned.  In debug mode it expects the address to be
2569 // in the heap and asserts such.
2570 PSParallelCompact::SpaceId PSParallelCompact::space_id(HeapWord* addr) {
2571   assert(ParallelScavengeHeap::heap()->is_in_reserved(addr), "addr not in the heap");
2572 
2573   for (unsigned int id = old_space_id; id < last_space_id; ++id) {
2574     if (_space_info[id].space()->contains(addr)) {
2575       return SpaceId(id);
2576     }
2577   }
2578 
2579   assert(false, "no space contains the addr");
2580   return last_space_id;
2581 }
2582 
2583 void PSParallelCompact::update_deferred_objects(ParCompactionManager* cm,
2584                                                 SpaceId id) {
2585   assert(id < last_space_id, "bad space id");
2586 
2587   ParallelCompactData& sd = summary_data();
2588   const SpaceInfo* const space_info = _space_info + id;
2589   ObjectStartArray* const start_array = space_info->start_array();
2590 
2591   const MutableSpace* const space = space_info->space();
2592   assert(space_info->dense_prefix() >= space->bottom(), "dense_prefix not set");
2593   HeapWord* const beg_addr = space_info->dense_prefix();
2594   HeapWord* const end_addr = sd.region_align_up(space_info->new_top());
2595 
2596   const RegionData* const beg_region = sd.addr_to_region_ptr(beg_addr);
2597   const RegionData* const end_region = sd.addr_to_region_ptr(end_addr);
2598   const RegionData* cur_region;
2599   for (cur_region = beg_region; cur_region < end_region; ++cur_region) {
2600     HeapWord* const addr = cur_region->deferred_obj_addr();
2601     if (addr != NULL) {
2602       if (start_array != NULL) {
2603         start_array->allocate_block(addr);
2604       }
2605       cm->update_contents(oop(addr));
2606       assert(oop(addr)->is_oop_or_null(), "Expected an oop or NULL at " PTR_FORMAT, p2i(oop(addr)));
2607     }
2608   }
2609 }
2610 
2611 // Skip over count live words starting from beg, and return the address of the
2612 // next live word.  Unless marked, the word corresponding to beg is assumed to
2613 // be dead.  Callers must either ensure beg does not correspond to the middle of
2614 // an object, or account for those live words in some other way.  Callers must
2615 // also ensure that there are enough live words in the range [beg, end) to skip.
2616 HeapWord*
2617 PSParallelCompact::skip_live_words(HeapWord* beg, HeapWord* end, size_t count)
2618 {
2619   assert(count > 0, "sanity");
2620 
2621   ParMarkBitMap* m = mark_bitmap();
2622   idx_t bits_to_skip = m->words_to_bits(count);
2623   idx_t cur_beg = m->addr_to_bit(beg);
2624   const idx_t search_end = BitMap::word_align_up(m->addr_to_bit(end));
2625 
2626   do {
2627     cur_beg = m->find_obj_beg(cur_beg, search_end);
2628     idx_t cur_end = m->find_obj_end(cur_beg, search_end);
2629     const size_t obj_bits = cur_end - cur_beg + 1;
2630     if (obj_bits > bits_to_skip) {
2631       return m->bit_to_addr(cur_beg + bits_to_skip);
2632     }
2633     bits_to_skip -= obj_bits;
2634     cur_beg = cur_end + 1;
2635   } while (bits_to_skip > 0);
2636 
2637   // Skipping the desired number of words landed just past the end of an object.
2638   // Find the start of the next object.
2639   cur_beg = m->find_obj_beg(cur_beg, search_end);
2640   assert(cur_beg < m->addr_to_bit(end), "not enough live words to skip");
2641   return m->bit_to_addr(cur_beg);
2642 }
2643 
2644 HeapWord* PSParallelCompact::first_src_addr(HeapWord* const dest_addr,
2645                                             SpaceId src_space_id,
2646                                             size_t src_region_idx)
2647 {
2648   assert(summary_data().is_region_aligned(dest_addr), "not aligned");
2649 
2650   const SplitInfo& split_info = _space_info[src_space_id].split_info();
2651   if (split_info.dest_region_addr() == dest_addr) {
2652     // The partial object ending at the split point contains the first word to
2653     // be copied to dest_addr.
2654     return split_info.first_src_addr();
2655   }
2656 
2657   const ParallelCompactData& sd = summary_data();
2658   ParMarkBitMap* const bitmap = mark_bitmap();
2659   const size_t RegionSize = ParallelCompactData::RegionSize;
2660 
2661   assert(sd.is_region_aligned(dest_addr), "not aligned");
2662   const RegionData* const src_region_ptr = sd.region(src_region_idx);
2663   const size_t partial_obj_size = src_region_ptr->partial_obj_size();
2664   HeapWord* const src_region_destination = src_region_ptr->destination();
2665 
2666   assert(dest_addr >= src_region_destination, "wrong src region");
2667   assert(src_region_ptr->data_size() > 0, "src region cannot be empty");
2668 
2669   HeapWord* const src_region_beg = sd.region_to_addr(src_region_idx);
2670   HeapWord* const src_region_end = src_region_beg + RegionSize;
2671 
2672   HeapWord* addr = src_region_beg;
2673   if (dest_addr == src_region_destination) {
2674     // Return the first live word in the source region.
2675     if (partial_obj_size == 0) {
2676       addr = bitmap->find_obj_beg(addr, src_region_end);
2677       assert(addr < src_region_end, "no objects start in src region");
2678     }
2679     return addr;
2680   }
2681 
2682   // Must skip some live data.
2683   size_t words_to_skip = dest_addr - src_region_destination;
2684   assert(src_region_ptr->data_size() > words_to_skip, "wrong src region");
2685 
2686   if (partial_obj_size >= words_to_skip) {
2687     // All the live words to skip are part of the partial object.
2688     addr += words_to_skip;
2689     if (partial_obj_size == words_to_skip) {
2690       // Find the first live word past the partial object.
2691       addr = bitmap->find_obj_beg(addr, src_region_end);
2692       assert(addr < src_region_end, "wrong src region");
2693     }
2694     return addr;
2695   }
2696 
2697   // Skip over the partial object (if any).
2698   if (partial_obj_size != 0) {
2699     words_to_skip -= partial_obj_size;
2700     addr += partial_obj_size;
2701   }
2702 
2703   // Skip over live words due to objects that start in the region.
2704   addr = skip_live_words(addr, src_region_end, words_to_skip);
2705   assert(addr < src_region_end, "wrong src region");
2706   return addr;
2707 }
2708 
2709 void PSParallelCompact::decrement_destination_counts(ParCompactionManager* cm,
2710                                                      SpaceId src_space_id,
2711                                                      size_t beg_region,
2712                                                      HeapWord* end_addr)
2713 {
2714   ParallelCompactData& sd = summary_data();
2715 
2716 #ifdef ASSERT
2717   MutableSpace* const src_space = _space_info[src_space_id].space();
2718   HeapWord* const beg_addr = sd.region_to_addr(beg_region);
2719   assert(src_space->contains(beg_addr) || beg_addr == src_space->end(),
2720          "src_space_id does not match beg_addr");
2721   assert(src_space->contains(end_addr) || end_addr == src_space->end(),
2722          "src_space_id does not match end_addr");
2723 #endif // #ifdef ASSERT
2724 
2725   RegionData* const beg = sd.region(beg_region);
2726   RegionData* const end = sd.addr_to_region_ptr(sd.region_align_up(end_addr));
2727 
2728   // Regions up to new_top() are enqueued if they become available.
2729   HeapWord* const new_top = _space_info[src_space_id].new_top();
2730   RegionData* const enqueue_end =
2731     sd.addr_to_region_ptr(sd.region_align_up(new_top));
2732 
2733   for (RegionData* cur = beg; cur < end; ++cur) {
2734     assert(cur->data_size() > 0, "region must have live data");
2735     cur->decrement_destination_count();
2736     if (cur < enqueue_end && cur->available() && cur->claim()) {
2737       cm->push_region(sd.region(cur));
2738     }
2739   }
2740 }
2741 
2742 size_t PSParallelCompact::next_src_region(MoveAndUpdateClosure& closure,
2743                                           SpaceId& src_space_id,
2744                                           HeapWord*& src_space_top,
2745                                           HeapWord* end_addr)
2746 {
2747   typedef ParallelCompactData::RegionData RegionData;
2748 
2749   ParallelCompactData& sd = PSParallelCompact::summary_data();
2750   const size_t region_size = ParallelCompactData::RegionSize;
2751 
2752   size_t src_region_idx = 0;
2753 
2754   // Skip empty regions (if any) up to the top of the space.
2755   HeapWord* const src_aligned_up = sd.region_align_up(end_addr);
2756   RegionData* src_region_ptr = sd.addr_to_region_ptr(src_aligned_up);
2757   HeapWord* const top_aligned_up = sd.region_align_up(src_space_top);
2758   const RegionData* const top_region_ptr =
2759     sd.addr_to_region_ptr(top_aligned_up);
2760   while (src_region_ptr < top_region_ptr && src_region_ptr->data_size() == 0) {
2761     ++src_region_ptr;
2762   }
2763 
2764   if (src_region_ptr < top_region_ptr) {
2765     // The next source region is in the current space.  Update src_region_idx
2766     // and the source address to match src_region_ptr.
2767     src_region_idx = sd.region(src_region_ptr);
2768     HeapWord* const src_region_addr = sd.region_to_addr(src_region_idx);
2769     if (src_region_addr > closure.source()) {
2770       closure.set_source(src_region_addr);
2771     }
2772     return src_region_idx;
2773   }
2774 
2775   // Switch to a new source space and find the first non-empty region.
2776   unsigned int space_id = src_space_id + 1;
2777   assert(space_id < last_space_id, "not enough spaces");
2778 
2779   HeapWord* const destination = closure.destination();
2780 
2781   do {
2782     MutableSpace* space = _space_info[space_id].space();
2783     HeapWord* const bottom = space->bottom();
2784     const RegionData* const bottom_cp = sd.addr_to_region_ptr(bottom);
2785 
2786     // Iterate over the spaces that do not compact into themselves.
2787     if (bottom_cp->destination() != bottom) {
2788       HeapWord* const top_aligned_up = sd.region_align_up(space->top());
2789       const RegionData* const top_cp = sd.addr_to_region_ptr(top_aligned_up);
2790 
2791       for (const RegionData* src_cp = bottom_cp; src_cp < top_cp; ++src_cp) {
2792         if (src_cp->live_obj_size() > 0) {
2793           // Found it.
2794           assert(src_cp->destination() == destination,
2795                  "first live obj in the space must match the destination");
2796           assert(src_cp->partial_obj_size() == 0,
2797                  "a space cannot begin with a partial obj");
2798 
2799           src_space_id = SpaceId(space_id);
2800           src_space_top = space->top();
2801           const size_t src_region_idx = sd.region(src_cp);
2802           closure.set_source(sd.region_to_addr(src_region_idx));
2803           return src_region_idx;
2804         } else {
2805           assert(src_cp->data_size() == 0, "sanity");
2806         }
2807       }
2808     }
2809   } while (++space_id < last_space_id);
2810 
2811   assert(false, "no source region was found");
2812   return 0;
2813 }
2814 
2815 void PSParallelCompact::fill_region(ParCompactionManager* cm, size_t region_idx)
2816 {
2817   typedef ParMarkBitMap::IterationStatus IterationStatus;
2818   const size_t RegionSize = ParallelCompactData::RegionSize;
2819   ParMarkBitMap* const bitmap = mark_bitmap();
2820   ParallelCompactData& sd = summary_data();
2821   RegionData* const region_ptr = sd.region(region_idx);
2822 
2823   // Get the items needed to construct the closure.
2824   HeapWord* dest_addr = sd.region_to_addr(region_idx);
2825   SpaceId dest_space_id = space_id(dest_addr);
2826   ObjectStartArray* start_array = _space_info[dest_space_id].start_array();
2827   HeapWord* new_top = _space_info[dest_space_id].new_top();
2828   assert(dest_addr < new_top, "sanity");
2829   const size_t words = MIN2(pointer_delta(new_top, dest_addr), RegionSize);
2830 
2831   // Get the source region and related info.
2832   size_t src_region_idx = region_ptr->source_region();
2833   SpaceId src_space_id = space_id(sd.region_to_addr(src_region_idx));
2834   HeapWord* src_space_top = _space_info[src_space_id].space()->top();
2835 
2836   MoveAndUpdateClosure closure(bitmap, cm, start_array, dest_addr, words);
2837   closure.set_source(first_src_addr(dest_addr, src_space_id, src_region_idx));
2838 
2839   // Adjust src_region_idx to prepare for decrementing destination counts (the
2840   // destination count is not decremented when a region is copied to itself).
2841   if (src_region_idx == region_idx) {
2842     src_region_idx += 1;
2843   }
2844 
2845   if (bitmap->is_unmarked(closure.source())) {
2846     // The first source word is in the middle of an object; copy the remainder
2847     // of the object or as much as will fit.  The fact that pointer updates were
2848     // deferred will be noted when the object header is processed.
2849     HeapWord* const old_src_addr = closure.source();
2850     closure.copy_partial_obj();
2851     if (closure.is_full()) {
2852       decrement_destination_counts(cm, src_space_id, src_region_idx,
2853                                    closure.source());
2854       region_ptr->set_deferred_obj_addr(NULL);
2855       region_ptr->set_completed();
2856       return;
2857     }
2858 
2859     HeapWord* const end_addr = sd.region_align_down(closure.source());
2860     if (sd.region_align_down(old_src_addr) != end_addr) {
2861       // The partial object was copied from more than one source region.
2862       decrement_destination_counts(cm, src_space_id, src_region_idx, end_addr);
2863 
2864       // Move to the next source region, possibly switching spaces as well.  All
2865       // args except end_addr may be modified.
2866       src_region_idx = next_src_region(closure, src_space_id, src_space_top,
2867                                        end_addr);
2868     }
2869   }
2870 
2871   do {
2872     HeapWord* const cur_addr = closure.source();
2873     HeapWord* const end_addr = MIN2(sd.region_align_up(cur_addr + 1),
2874                                     src_space_top);
2875     IterationStatus status = bitmap->iterate(&closure, cur_addr, end_addr);
2876 
2877     if (status == ParMarkBitMap::incomplete) {
2878       // The last obj that starts in the source region does not end in the
2879       // region.
2880       assert(closure.source() < end_addr, "sanity");
2881       HeapWord* const obj_beg = closure.source();
2882       HeapWord* const range_end = MIN2(obj_beg + closure.words_remaining(),
2883                                        src_space_top);
2884       HeapWord* const obj_end = bitmap->find_obj_end(obj_beg, range_end);
2885       if (obj_end < range_end) {
2886         // The end was found; the entire object will fit.
2887         status = closure.do_addr(obj_beg, bitmap->obj_size(obj_beg, obj_end));
2888         assert(status != ParMarkBitMap::would_overflow, "sanity");
2889       } else {
2890         // The end was not found; the object will not fit.
2891         assert(range_end < src_space_top, "obj cannot cross space boundary");
2892         status = ParMarkBitMap::would_overflow;
2893       }
2894     }
2895 
2896     if (status == ParMarkBitMap::would_overflow) {
2897       // The last object did not fit.  Note that interior oop updates were
2898       // deferred, then copy enough of the object to fill the region.
2899       region_ptr->set_deferred_obj_addr(closure.destination());
2900       status = closure.copy_until_full(); // copies from closure.source()
2901 
2902       decrement_destination_counts(cm, src_space_id, src_region_idx,
2903                                    closure.source());
2904       region_ptr->set_completed();
2905       return;
2906     }
2907 
2908     if (status == ParMarkBitMap::full) {
2909       decrement_destination_counts(cm, src_space_id, src_region_idx,
2910                                    closure.source());
2911       region_ptr->set_deferred_obj_addr(NULL);
2912       region_ptr->set_completed();
2913       return;
2914     }
2915 
2916     decrement_destination_counts(cm, src_space_id, src_region_idx, end_addr);
2917 
2918     // Move to the next source region, possibly switching spaces as well.  All
2919     // args except end_addr may be modified.
2920     src_region_idx = next_src_region(closure, src_space_id, src_space_top,
2921                                      end_addr);
2922   } while (true);
2923 }
2924 
2925 void PSParallelCompact::fill_blocks(size_t region_idx)
2926 {
2927   // Fill in the block table elements for the specified region.  Each block
2928   // table element holds the number of live words in the region that are to the
2929   // left of the first object that starts in the block.  Thus only blocks in
2930   // which an object starts need to be filled.
2931   //
2932   // The algorithm scans the section of the bitmap that corresponds to the
2933   // region, keeping a running total of the live words.  When an object start is
2934   // found, if it's the first to start in the block that contains it, the
2935   // current total is written to the block table element.
2936   const size_t Log2BlockSize = ParallelCompactData::Log2BlockSize;
2937   const size_t Log2RegionSize = ParallelCompactData::Log2RegionSize;
2938   const size_t RegionSize = ParallelCompactData::RegionSize;
2939 
2940   ParallelCompactData& sd = summary_data();
2941   const size_t partial_obj_size = sd.region(region_idx)->partial_obj_size();
2942   if (partial_obj_size >= RegionSize) {
2943     return; // No objects start in this region.
2944   }
2945 
2946   // Ensure the first loop iteration decides that the block has changed.
2947   size_t cur_block = sd.block_count();
2948 
2949   const ParMarkBitMap* const bitmap = mark_bitmap();
2950 
2951   const size_t Log2BitsPerBlock = Log2BlockSize - LogMinObjAlignment;
2952   assert((size_t)1 << Log2BitsPerBlock ==
2953          bitmap->words_to_bits(ParallelCompactData::BlockSize), "sanity");
2954 
2955   size_t beg_bit = bitmap->words_to_bits(region_idx << Log2RegionSize);
2956   const size_t range_end = beg_bit + bitmap->words_to_bits(RegionSize);
2957   size_t live_bits = bitmap->words_to_bits(partial_obj_size);
2958   beg_bit = bitmap->find_obj_beg(beg_bit + live_bits, range_end);
2959   while (beg_bit < range_end) {
2960     const size_t new_block = beg_bit >> Log2BitsPerBlock;
2961     if (new_block != cur_block) {
2962       cur_block = new_block;
2963       sd.block(cur_block)->set_offset(bitmap->bits_to_words(live_bits));
2964     }
2965 
2966     const size_t end_bit = bitmap->find_obj_end(beg_bit, range_end);
2967     if (end_bit < range_end - 1) {
2968       live_bits += end_bit - beg_bit + 1;
2969       beg_bit = bitmap->find_obj_beg(end_bit + 1, range_end);
2970     } else {
2971       return;
2972     }
2973   }
2974 }
2975 
2976 void
2977 PSParallelCompact::move_and_update(ParCompactionManager* cm, SpaceId space_id) {
2978   const MutableSpace* sp = space(space_id);
2979   if (sp->is_empty()) {
2980     return;
2981   }
2982 
2983   ParallelCompactData& sd = PSParallelCompact::summary_data();
2984   ParMarkBitMap* const bitmap = mark_bitmap();
2985   HeapWord* const dp_addr = dense_prefix(space_id);
2986   HeapWord* beg_addr = sp->bottom();
2987   HeapWord* end_addr = sp->top();
2988 
2989   assert(beg_addr <= dp_addr && dp_addr <= end_addr, "bad dense prefix");
2990 
2991   const size_t beg_region = sd.addr_to_region_idx(beg_addr);
2992   const size_t dp_region = sd.addr_to_region_idx(dp_addr);
2993   if (beg_region < dp_region) {
2994     update_and_deadwood_in_dense_prefix(cm, space_id, beg_region, dp_region);
2995   }
2996 
2997   // The destination of the first live object that starts in the region is one
2998   // past the end of the partial object entering the region (if any).
2999   HeapWord* const dest_addr = sd.partial_obj_end(dp_region);
3000   HeapWord* const new_top = _space_info[space_id].new_top();
3001   assert(new_top >= dest_addr, "bad new_top value");
3002   const size_t words = pointer_delta(new_top, dest_addr);
3003 
3004   if (words > 0) {
3005     ObjectStartArray* start_array = _space_info[space_id].start_array();
3006     MoveAndUpdateClosure closure(bitmap, cm, start_array, dest_addr, words);
3007 
3008     ParMarkBitMap::IterationStatus status;
3009     status = bitmap->iterate(&closure, dest_addr, end_addr);
3010     assert(status == ParMarkBitMap::full, "iteration not complete");
3011     assert(bitmap->find_obj_beg(closure.source(), end_addr) == end_addr,
3012            "live objects skipped because closure is full");
3013   }
3014 }
3015 
3016 jlong PSParallelCompact::millis_since_last_gc() {
3017   // We need a monotonically non-decreasing time in ms but
3018   // os::javaTimeMillis() does not guarantee monotonicity.
3019   jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
3020   jlong ret_val = now - _time_of_last_gc;
3021   // XXX See note in genCollectedHeap::millis_since_last_gc().
3022   if (ret_val < 0) {
3023     NOT_PRODUCT(warning("time warp: " JLONG_FORMAT, ret_val);)
3024     return 0;
3025   }
3026   return ret_val;
3027 }
3028 
3029 void PSParallelCompact::reset_millis_since_last_gc() {
3030   // We need a monotonically non-decreasing time in ms but
3031   // os::javaTimeMillis() does not guarantee monotonicity.
3032   _time_of_last_gc = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
3033 }
3034 
3035 ParMarkBitMap::IterationStatus MoveAndUpdateClosure::copy_until_full()
3036 {
3037   if (source() != destination()) {
3038     DEBUG_ONLY(PSParallelCompact::check_new_location(source(), destination());)
3039     Copy::aligned_conjoint_words(source(), destination(), words_remaining());
3040   }
3041   update_state(words_remaining());
3042   assert(is_full(), "sanity");
3043   return ParMarkBitMap::full;
3044 }
3045 
3046 void MoveAndUpdateClosure::copy_partial_obj()
3047 {
3048   size_t words = words_remaining();
3049 
3050   HeapWord* const range_end = MIN2(source() + words, bitmap()->region_end());
3051   HeapWord* const end_addr = bitmap()->find_obj_end(source(), range_end);
3052   if (end_addr < range_end) {
3053     words = bitmap()->obj_size(source(), end_addr);
3054   }
3055 
3056   // This test is necessary; if omitted, the pointer updates to a partial object
3057   // that crosses the dense prefix boundary could be overwritten.
3058   if (source() != destination()) {
3059     DEBUG_ONLY(PSParallelCompact::check_new_location(source(), destination());)
3060     Copy::aligned_conjoint_words(source(), destination(), words);
3061   }
3062   update_state(words);
3063 }
3064 
3065 void InstanceKlass::oop_pc_update_pointers(oop obj) {
3066   oop_oop_iterate_oop_maps<true>(obj, PSParallelCompact::adjust_pointer_closure());

3067 }
3068 
3069 void InstanceMirrorKlass::oop_pc_update_pointers(oop obj) {
3070   InstanceKlass::oop_pc_update_pointers(obj);
3071 
3072   oop_oop_iterate_statics<true>(obj, PSParallelCompact::adjust_pointer_closure());

3073 }
3074 
3075 void InstanceClassLoaderKlass::oop_pc_update_pointers(oop obj) {
3076   InstanceKlass::oop_pc_update_pointers(obj);
3077 }
3078 
3079 #ifdef ASSERT
3080 template <class T> static void trace_reference_gc(const char *s, oop obj,
3081                                                   T* referent_addr,
3082                                                   T* next_addr,
3083                                                   T* discovered_addr) {
3084   log_develop_trace(gc, ref)("%s obj " PTR_FORMAT, s, p2i(obj));
3085   log_develop_trace(gc, ref)("     referent_addr/* " PTR_FORMAT " / " PTR_FORMAT,
3086                              p2i(referent_addr), referent_addr ? p2i(oopDesc::load_decode_heap_oop(referent_addr)) : NULL);
3087   log_develop_trace(gc, ref)("     next_addr/* " PTR_FORMAT " / " PTR_FORMAT,
3088                              p2i(next_addr), next_addr ? p2i(oopDesc::load_decode_heap_oop(next_addr)) : NULL);
3089   log_develop_trace(gc, ref)("     discovered_addr/* " PTR_FORMAT " / " PTR_FORMAT,
3090                              p2i(discovered_addr), discovered_addr ? p2i(oopDesc::load_decode_heap_oop(discovered_addr)) : NULL);
3091 }
3092 #endif
3093 
3094 template <class T>
3095 static void oop_pc_update_pointers_specialized(oop obj) {
3096   T* referent_addr = (T*)java_lang_ref_Reference::referent_addr(obj);
3097   PSParallelCompact::adjust_pointer(referent_addr);
3098   T* next_addr = (T*)java_lang_ref_Reference::next_addr(obj);
3099   PSParallelCompact::adjust_pointer(next_addr);
3100   T* discovered_addr = (T*)java_lang_ref_Reference::discovered_addr(obj);
3101   PSParallelCompact::adjust_pointer(discovered_addr);
3102   debug_only(trace_reference_gc("InstanceRefKlass::oop_update_ptrs", obj,
3103                                 referent_addr, next_addr, discovered_addr);)
3104 }
3105 
3106 void InstanceRefKlass::oop_pc_update_pointers(oop obj) {
3107   InstanceKlass::oop_pc_update_pointers(obj);
3108 
3109   if (UseCompressedOops) {
3110     oop_pc_update_pointers_specialized<narrowOop>(obj);
3111   } else {
3112     oop_pc_update_pointers_specialized<oop>(obj);
3113   }
3114 }
3115 
3116 void ObjArrayKlass::oop_pc_update_pointers(oop obj) {
3117   assert(obj->is_objArray(), "obj must be obj array");
3118   oop_oop_iterate_elements<true>(objArrayOop(obj), PSParallelCompact::adjust_pointer_closure());

3119 }
3120 
3121 void TypeArrayKlass::oop_pc_update_pointers(oop obj) {
3122   assert(obj->is_typeArray(),"must be a type array");
3123 }
3124 
3125 ParMarkBitMapClosure::IterationStatus
3126 MoveAndUpdateClosure::do_addr(HeapWord* addr, size_t words) {
3127   assert(destination() != NULL, "sanity");
3128   assert(bitmap()->obj_size(addr) == words, "bad size");
3129 
3130   _source = addr;
3131   assert(PSParallelCompact::summary_data().calc_new_pointer(source()) ==
3132          destination(), "wrong destination");
3133 
3134   if (words > words_remaining()) {
3135     return ParMarkBitMap::would_overflow;
3136   }
3137 
3138   // The start_array must be updated even if the object is not moving.
3139   if (_start_array != NULL) {
3140     _start_array->allocate_block(destination());
3141   }
3142 
3143   if (destination() != source()) {
3144     DEBUG_ONLY(PSParallelCompact::check_new_location(source(), destination());)
3145     Copy::aligned_conjoint_words(source(), destination(), words);
3146   }
3147 
3148   oop moved_oop = (oop) destination();
3149   compaction_manager()->update_contents(moved_oop);
3150   assert(moved_oop->is_oop_or_null(), "Expected an oop or NULL at " PTR_FORMAT, p2i(moved_oop));
3151 
3152   update_state(words);
3153   assert(destination() == (HeapWord*)moved_oop + moved_oop->size(), "sanity");
3154   return is_full() ? ParMarkBitMap::full : ParMarkBitMap::incomplete;
3155 }
3156 
3157 UpdateOnlyClosure::UpdateOnlyClosure(ParMarkBitMap* mbm,
3158                                      ParCompactionManager* cm,
3159                                      PSParallelCompact::SpaceId space_id) :
3160   ParMarkBitMapClosure(mbm, cm),
3161   _space_id(space_id),
3162   _start_array(PSParallelCompact::start_array(space_id))
3163 {
3164 }
3165 
3166 // Updates the references in the object to their new values.
3167 ParMarkBitMapClosure::IterationStatus
3168 UpdateOnlyClosure::do_addr(HeapWord* addr, size_t words) {
3169   do_addr(addr);
3170   return ParMarkBitMap::incomplete;
3171 }
--- EOF ---