Print this page
rev 4535 : 6725714: par compact - add a table to speed up bitmap searches
Reviewed-by: jmasa, tschatzl
Split |
Split |
Close |
Expand all |
Collapse all |
--- old/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.hpp
+++ new/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.hpp
1 1 /*
2 2 * Copyright (c) 2005, 2012, Oracle and/or its affiliates. All rights reserved.
3 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 4 *
5 5 * This code is free software; you can redistribute it and/or modify it
6 6 * under the terms of the GNU General Public License version 2 only, as
7 7 * published by the Free Software Foundation.
8 8 *
9 9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 12 * version 2 for more details (a copy is included in the LICENSE file that
13 13 * accompanied this code).
14 14 *
15 15 * You should have received a copy of the GNU General Public License version
16 16 * 2 along with this work; if not, write to the Free Software Foundation,
17 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 18 *
19 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 20 * or visit www.oracle.com if you need additional information or have any
21 21 * questions.
22 22 *
23 23 */
24 24
25 25 #ifndef SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PSPARALLELCOMPACT_HPP
26 26 #define SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PSPARALLELCOMPACT_HPP
27 27
28 28 #include "gc_implementation/parallelScavenge/objectStartArray.hpp"
29 29 #include "gc_implementation/parallelScavenge/parMarkBitMap.hpp"
30 30 #include "gc_implementation/parallelScavenge/psCompactionManager.hpp"
31 31 #include "gc_implementation/shared/collectorCounters.hpp"
32 32 #include "gc_implementation/shared/markSweep.hpp"
33 33 #include "gc_implementation/shared/mutableSpace.hpp"
34 34 #include "memory/sharedHeap.hpp"
35 35 #include "oops/oop.hpp"
36 36
37 37 class ParallelScavengeHeap;
38 38 class PSAdaptiveSizePolicy;
39 39 class PSYoungGen;
40 40 class PSOldGen;
41 41 class PSPermGen;
42 42 class ParCompactionManager;
43 43 class ParallelTaskTerminator;
44 44 class PSParallelCompact;
45 45 class GCTaskManager;
46 46 class GCTaskQueue;
47 47 class PreGCValues;
48 48 class MoveAndUpdateClosure;
49 49 class RefProcTaskExecutor;
50 50 class ParallelOldTracer;
51 51 class STWGCTimer;
52 52
53 53 // The SplitInfo class holds the information needed to 'split' a source region
54 54 // so that the live data can be copied to two destination *spaces*. Normally,
55 55 // all the live data in a region is copied to a single destination space (e.g.,
56 56 // everything live in a region in eden is copied entirely into the old gen).
57 57 // However, when the heap is nearly full, all the live data in eden may not fit
58 58 // into the old gen. Copying only some of the regions from eden to old gen
59 59 // requires finding a region that does not contain a partial object (i.e., no
60 60 // live object crosses the region boundary) somewhere near the last object that
61 61 // does fit into the old gen. Since it's not always possible to find such a
62 62 // region, splitting is necessary for predictable behavior.
63 63 //
64 64 // A region is always split at the end of the partial object. This avoids
65 65 // additional tests when calculating the new location of a pointer, which is a
66 66 // very hot code path. The partial object and everything to its left will be
67 67 // copied to another space (call it dest_space_1). The live data to the right
68 68 // of the partial object will be copied either within the space itself, or to a
69 69 // different destination space (distinct from dest_space_1).
70 70 //
71 71 // Split points are identified during the summary phase, when region
72 72 // destinations are computed: data about the split, including the
73 73 // partial_object_size, is recorded in a SplitInfo record and the
74 74 // partial_object_size field in the summary data is set to zero. The zeroing is
75 75 // possible (and necessary) since the partial object will move to a different
76 76 // destination space than anything to its right, thus the partial object should
77 77 // not affect the locations of any objects to its right.
78 78 //
79 79 // The recorded data is used during the compaction phase, but only rarely: when
80 80 // the partial object on the split region will be copied across a destination
81 81 // region boundary. This test is made once each time a region is filled, and is
82 82 // a simple address comparison, so the overhead is negligible (see
83 83 // PSParallelCompact::first_src_addr()).
84 84 //
85 85 // Notes:
86 86 //
87 87 // Only regions with partial objects are split; a region without a partial
88 88 // object does not need any extra bookkeeping.
89 89 //
90 90 // At most one region is split per space, so the amount of data required is
91 91 // constant.
92 92 //
93 93 // A region is split only when the destination space would overflow. Once that
94 94 // happens, the destination space is abandoned and no other data (even from
95 95 // other source spaces) is targeted to that destination space. Abandoning the
96 96 // destination space may leave a somewhat large unused area at the end, if a
97 97 // large object caused the overflow.
98 98 //
99 99 // Future work:
100 100 //
101 101 // More bookkeeping would be required to continue to use the destination space.
102 102 // The most general solution would allow data from regions in two different
103 103 // source spaces to be "joined" in a single destination region. At the very
104 104 // least, additional code would be required in next_src_region() to detect the
105 105 // join and skip to an out-of-order source region. If the join region was also
106 106 // the last destination region to which a split region was copied (the most
107 107 // likely case), then additional work would be needed to get fill_region() to
108 108 // stop iteration and switch to a new source region at the right point. Basic
109 109 // idea would be to use a fake value for the top of the source space. It is
110 110 // doable, if a bit tricky.
111 111 //
112 112 // A simpler (but less general) solution would fill the remainder of the
113 113 // destination region with a dummy object and continue filling the next
114 114 // destination region.
115 115
116 116 class SplitInfo
117 117 {
118 118 public:
119 119 // Return true if this split info is valid (i.e., if a split has been
120 120 // recorded). The very first region cannot have a partial object and thus is
121 121 // never split, so 0 is the 'invalid' value.
122 122 bool is_valid() const { return _src_region_idx > 0; }
123 123
124 124 // Return true if this split holds data for the specified source region.
125 125 inline bool is_split(size_t source_region) const;
126 126
127 127 // The index of the split region, the size of the partial object on that
128 128 // region and the destination of the partial object.
129 129 size_t src_region_idx() const { return _src_region_idx; }
130 130 size_t partial_obj_size() const { return _partial_obj_size; }
131 131 HeapWord* destination() const { return _destination; }
132 132
133 133 // The destination count of the partial object referenced by this split
134 134 // (either 1 or 2). This must be added to the destination count of the
135 135 // remainder of the source region.
136 136 unsigned int destination_count() const { return _destination_count; }
137 137
138 138 // If a word within the partial object will be written to the first word of a
139 139 // destination region, this is the address of the destination region;
140 140 // otherwise this is NULL.
141 141 HeapWord* dest_region_addr() const { return _dest_region_addr; }
142 142
143 143 // If a word within the partial object will be written to the first word of a
144 144 // destination region, this is the address of that word within the partial
145 145 // object; otherwise this is NULL.
146 146 HeapWord* first_src_addr() const { return _first_src_addr; }
147 147
148 148 // Record the data necessary to split the region src_region_idx.
149 149 void record(size_t src_region_idx, size_t partial_obj_size,
150 150 HeapWord* destination);
151 151
152 152 void clear();
153 153
154 154 DEBUG_ONLY(void verify_clear();)
155 155
156 156 private:
157 157 size_t _src_region_idx;
158 158 size_t _partial_obj_size;
159 159 HeapWord* _destination;
160 160 unsigned int _destination_count;
161 161 HeapWord* _dest_region_addr;
162 162 HeapWord* _first_src_addr;
163 163 };
164 164
165 165 inline bool SplitInfo::is_split(size_t region_idx) const
166 166 {
167 167 return _src_region_idx == region_idx && is_valid();
168 168 }
169 169
170 170 class SpaceInfo
171 171 {
172 172 public:
173 173 MutableSpace* space() const { return _space; }
174 174
175 175 // Where the free space will start after the collection. Valid only after the
176 176 // summary phase completes.
177 177 HeapWord* new_top() const { return _new_top; }
178 178
179 179 // Allows new_top to be set.
180 180 HeapWord** new_top_addr() { return &_new_top; }
181 181
182 182 // Where the smallest allowable dense prefix ends (used only for perm gen).
183 183 HeapWord* min_dense_prefix() const { return _min_dense_prefix; }
184 184
185 185 // Where the dense prefix ends, or the compacted region begins.
186 186 HeapWord* dense_prefix() const { return _dense_prefix; }
187 187
188 188 // The start array for the (generation containing the) space, or NULL if there
189 189 // is no start array.
190 190 ObjectStartArray* start_array() const { return _start_array; }
191 191
192 192 SplitInfo& split_info() { return _split_info; }
193 193
194 194 void set_space(MutableSpace* s) { _space = s; }
195 195 void set_new_top(HeapWord* addr) { _new_top = addr; }
196 196 void set_min_dense_prefix(HeapWord* addr) { _min_dense_prefix = addr; }
197 197 void set_dense_prefix(HeapWord* addr) { _dense_prefix = addr; }
198 198 void set_start_array(ObjectStartArray* s) { _start_array = s; }
199 199
200 200 void publish_new_top() const { _space->set_top(_new_top); }
201 201
202 202 private:
203 203 MutableSpace* _space;
204 204 HeapWord* _new_top;
205 205 HeapWord* _min_dense_prefix;
206 206 HeapWord* _dense_prefix;
207 207 ObjectStartArray* _start_array;
208 208 SplitInfo _split_info;
209 209 };
210 210
211 211 class ParallelCompactData
212 212 {
213 213 public:
214 214 // Sizes are in HeapWords, unless indicated otherwise.
215 215 static const size_t Log2RegionSize;
↓ open down ↓ |
215 lines elided |
↑ open up ↑ |
216 216 static const size_t RegionSize;
217 217 static const size_t RegionSizeBytes;
218 218
219 219 // Mask for the bits in a size_t to get an offset within a region.
220 220 static const size_t RegionSizeOffsetMask;
221 221 // Mask for the bits in a pointer to get an offset within a region.
222 222 static const size_t RegionAddrOffsetMask;
223 223 // Mask for the bits in a pointer to get the address of the start of a region.
224 224 static const size_t RegionAddrMask;
225 225
226 + static const size_t Log2BlockSize;
227 + static const size_t BlockSize;
228 + static const size_t BlockSizeBytes;
229 +
230 + static const size_t BlockSizeOffsetMask;
231 + static const size_t BlockAddrOffsetMask;
232 + static const size_t BlockAddrMask;
233 +
234 + static const size_t BlocksPerRegion;
235 + static const size_t Log2BlocksPerRegion;
236 +
226 237 class RegionData
227 238 {
228 239 public:
229 240 // Destination address of the region.
230 241 HeapWord* destination() const { return _destination; }
231 242
232 243 // The first region containing data destined for this region.
233 244 size_t source_region() const { return _source_region; }
234 245
235 246 // The object (if any) starting in this region and ending in a different
236 247 // region that could not be updated during the main (parallel) compaction
237 248 // phase. This is different from _partial_obj_addr, which is an object that
238 249 // extends onto a source region. However, the two uses do not overlap in
239 250 // time, so the same field is used to save space.
240 251 HeapWord* deferred_obj_addr() const { return _partial_obj_addr; }
241 252
242 253 // The starting address of the partial object extending onto the region.
243 254 HeapWord* partial_obj_addr() const { return _partial_obj_addr; }
244 255
245 256 // Size of the partial object extending onto the region (words).
246 257 size_t partial_obj_size() const { return _partial_obj_size; }
247 258
248 259 // Size of live data that lies within this region due to objects that start
249 260 // in this region (words). This does not include the partial object
250 261 // extending onto the region (if any), or the part of an object that extends
251 262 // onto the next region (if any).
252 263 size_t live_obj_size() const { return _dc_and_los & los_mask; }
253 264
254 265 // Total live data that lies within the region (words).
255 266 size_t data_size() const { return partial_obj_size() + live_obj_size(); }
256 267
257 268 // The destination_count is the number of other regions to which data from
258 269 // this region will be copied. At the end of the summary phase, the valid
259 270 // values of destination_count are
260 271 //
261 272 // 0 - data from the region will be compacted completely into itself, or the
262 273 // region is empty. The region can be claimed and then filled.
263 274 // 1 - data from the region will be compacted into 1 other region; some
264 275 // data from the region may also be compacted into the region itself.
265 276 // 2 - data from the region will be copied to 2 other regions.
266 277 //
267 278 // During compaction as regions are emptied, the destination_count is
↓ open down ↓ |
32 lines elided |
↑ open up ↑ |
268 279 // decremented (atomically) and when it reaches 0, it can be claimed and
269 280 // then filled.
270 281 //
271 282 // A region is claimed for processing by atomically changing the
272 283 // destination_count to the claimed value (dc_claimed). After a region has
273 284 // been filled, the destination_count should be set to the completed value
274 285 // (dc_completed).
275 286 inline uint destination_count() const;
276 287 inline uint destination_count_raw() const;
277 288
289 + // Whether the block table for this region has been filled.
290 + inline bool blocks_filled() const;
291 +
292 + // Number of times the block table was filled.
293 + DEBUG_ONLY(inline size_t blocks_filled_count() const;)
294 +
278 295 // The location of the java heap data that corresponds to this region.
279 296 inline HeapWord* data_location() const;
280 297
281 298 // The highest address referenced by objects in this region.
282 299 inline HeapWord* highest_ref() const;
283 300
284 301 // Whether this region is available to be claimed, has been claimed, or has
285 302 // been completed.
286 303 //
287 304 // Minor subtlety: claimed() returns true if the region is marked
288 305 // completed(), which is desirable since a region must be claimed before it
289 306 // can be completed.
290 307 bool available() const { return _dc_and_los < dc_one; }
291 308 bool claimed() const { return _dc_and_los >= dc_claimed; }
↓ open down ↓ |
4 lines elided |
↑ open up ↑ |
292 309 bool completed() const { return _dc_and_los >= dc_completed; }
293 310
294 311 // These are not atomic.
295 312 void set_destination(HeapWord* addr) { _destination = addr; }
296 313 void set_source_region(size_t region) { _source_region = region; }
297 314 void set_deferred_obj_addr(HeapWord* addr) { _partial_obj_addr = addr; }
298 315 void set_partial_obj_addr(HeapWord* addr) { _partial_obj_addr = addr; }
299 316 void set_partial_obj_size(size_t words) {
300 317 _partial_obj_size = (region_sz_t) words;
301 318 }
319 + inline void set_blocks_filled();
302 320
303 321 inline void set_destination_count(uint count);
304 322 inline void set_live_obj_size(size_t words);
305 323 inline void set_data_location(HeapWord* addr);
306 324 inline void set_completed();
307 325 inline bool claim_unsafe();
308 326
309 327 // These are atomic.
310 328 inline void add_live_obj(size_t words);
311 329 inline void set_highest_ref(HeapWord* addr);
312 330 inline void decrement_destination_count();
313 331 inline bool claim();
314 332
315 333 private:
316 334 // The type used to represent object sizes within a region.
317 335 typedef uint region_sz_t;
318 336
319 337 // Constants for manipulating the _dc_and_los field, which holds both the
320 338 // destination count and live obj size. The live obj size lives at the
321 339 // least significant end so no masking is necessary when adding.
322 340 static const region_sz_t dc_shift; // Shift amount.
323 341 static const region_sz_t dc_mask; // Mask for destination count.
↓ open down ↓ |
12 lines elided |
↑ open up ↑ |
324 342 static const region_sz_t dc_one; // 1, shifted appropriately.
325 343 static const region_sz_t dc_claimed; // Region has been claimed.
326 344 static const region_sz_t dc_completed; // Region has been completed.
327 345 static const region_sz_t los_mask; // Mask for live obj size.
328 346
329 347 HeapWord* _destination;
330 348 size_t _source_region;
331 349 HeapWord* _partial_obj_addr;
332 350 region_sz_t _partial_obj_size;
333 351 region_sz_t volatile _dc_and_los;
352 + bool _blocks_filled;
353 +
334 354 #ifdef ASSERT
355 + size_t _blocks_filled_count; // Number of block table fills.
356 +
335 357 // These enable optimizations that are only partially implemented. Use
336 358 // debug builds to prevent the code fragments from breaking.
337 359 HeapWord* _data_location;
338 360 HeapWord* _highest_ref;
339 361 #endif // #ifdef ASSERT
340 362
341 363 #ifdef ASSERT
342 364 public:
343 - uint _pushed; // 0 until region is pushed onto a worker's stack
365 + uint _pushed; // 0 until region is pushed onto a stack
344 366 private:
345 367 #endif
346 368 };
347 369
370 + // "Blocks" allow shorter sections of the bitmap to be searched. Each Block
371 + // holds an offset, which is the amount of live data in the Region to the left
372 + // of the first live object that starts in the Block.
373 + class BlockData
374 + {
375 + public:
376 + typedef unsigned short int blk_ofs_t;
377 +
378 + blk_ofs_t offset() const { return _offset; }
379 + void set_offset(size_t val) { _offset = (blk_ofs_t)val; }
380 +
381 + private:
382 + blk_ofs_t _offset;
383 + };
384 +
348 385 public:
349 386 ParallelCompactData();
350 387 bool initialize(MemRegion covered_region);
351 388
352 389 size_t region_count() const { return _region_count; }
353 390
354 391 // Convert region indices to/from RegionData pointers.
355 392 inline RegionData* region(size_t region_idx) const;
356 393 inline size_t region(const RegionData* const region_ptr) const;
357 394
358 - // Returns true if the given address is contained within the region
359 - bool region_contains(size_t region_index, HeapWord* addr);
395 + size_t block_count() const { return _block_count; }
396 + inline BlockData* block(size_t block_idx) const;
397 + inline size_t block(const BlockData* block_ptr) const;
360 398
361 399 void add_obj(HeapWord* addr, size_t len);
362 400 void add_obj(oop p, size_t len) { add_obj((HeapWord*)p, len); }
363 401
364 402 // Fill in the regions covering [beg, end) so that no data moves; i.e., the
365 403 // destination of region n is simply the start of region n. The argument beg
366 404 // must be region-aligned; end need not be.
367 405 void summarize_dense_prefix(HeapWord* beg, HeapWord* end);
368 406
369 407 HeapWord* summarize_split_space(size_t src_region, SplitInfo& split_info,
370 408 HeapWord* destination, HeapWord* target_end,
371 409 HeapWord** target_next);
372 410 bool summarize(SplitInfo& split_info,
373 411 HeapWord* source_beg, HeapWord* source_end,
374 412 HeapWord** source_next,
375 413 HeapWord* target_beg, HeapWord* target_end,
376 414 HeapWord** target_next);
377 415
378 416 void clear();
379 417 void clear_range(size_t beg_region, size_t end_region);
380 418 void clear_range(HeapWord* beg, HeapWord* end) {
381 419 clear_range(addr_to_region_idx(beg), addr_to_region_idx(end));
382 420 }
383 421
384 422 // Return the number of words between addr and the start of the region
385 423 // containing addr.
386 424 inline size_t region_offset(const HeapWord* addr) const;
387 425
388 426 // Convert addresses to/from a region index or region pointer.
↓ open down ↓ |
19 lines elided |
↑ open up ↑ |
389 427 inline size_t addr_to_region_idx(const HeapWord* addr) const;
390 428 inline RegionData* addr_to_region_ptr(const HeapWord* addr) const;
391 429 inline HeapWord* region_to_addr(size_t region) const;
392 430 inline HeapWord* region_to_addr(size_t region, size_t offset) const;
393 431 inline HeapWord* region_to_addr(const RegionData* region) const;
394 432
395 433 inline HeapWord* region_align_down(HeapWord* addr) const;
396 434 inline HeapWord* region_align_up(HeapWord* addr) const;
397 435 inline bool is_region_aligned(HeapWord* addr) const;
398 436
437 + // Analogous to region_offset() for blocks.
438 + size_t block_offset(const HeapWord* addr) const;
439 + size_t addr_to_block_idx(const HeapWord* addr) const;
440 + size_t addr_to_block_idx(const oop obj) const {
441 + return addr_to_block_idx((HeapWord*) obj);
442 + }
443 + inline BlockData* addr_to_block_ptr(const HeapWord* addr) const;
444 + inline HeapWord* block_to_addr(size_t block) const;
445 + inline size_t region_to_block_idx(size_t region) const;
446 +
447 + inline HeapWord* block_align_down(HeapWord* addr) const;
448 + inline HeapWord* block_align_up(HeapWord* addr) const;
449 + inline bool is_block_aligned(HeapWord* addr) const;
450 +
399 451 // Return the address one past the end of the partial object.
400 452 HeapWord* partial_obj_end(size_t region_idx) const;
401 453
402 - // Return the new location of the object p after the
403 - // the compaction.
454 + // Return the location of the object after compaction.
404 455 HeapWord* calc_new_pointer(HeapWord* addr);
405 456
406 457 HeapWord* calc_new_pointer(oop p) {
407 458 return calc_new_pointer((HeapWord*) p);
408 459 }
409 460
410 461 // Return the updated address for the given klass
411 462 klassOop calc_new_klass(klassOop);
412 463
413 464 #ifdef ASSERT
414 465 void verify_clear(const PSVirtualSpace* vspace);
415 466 void verify_clear();
416 467 #endif // #ifdef ASSERT
417 468
418 469 private:
470 + bool initialize_block_data();
419 471 bool initialize_region_data(size_t region_size);
420 472 PSVirtualSpace* create_vspace(size_t count, size_t element_size);
421 473
422 474 private:
423 475 HeapWord* _region_start;
424 476 #ifdef ASSERT
425 477 HeapWord* _region_end;
426 478 #endif // #ifdef ASSERT
427 479
428 480 PSVirtualSpace* _region_vspace;
429 481 RegionData* _region_data;
430 482 size_t _region_count;
483 +
484 + PSVirtualSpace* _block_vspace;
485 + BlockData* _block_data;
486 + size_t _block_count;
431 487 };
432 488
433 489 inline uint
434 490 ParallelCompactData::RegionData::destination_count_raw() const
435 491 {
436 492 return _dc_and_los & dc_mask;
437 493 }
438 494
439 495 inline uint
440 496 ParallelCompactData::RegionData::destination_count() const
441 497 {
442 498 return destination_count_raw() >> dc_shift;
443 499 }
444 500
501 +inline bool
502 +ParallelCompactData::RegionData::blocks_filled() const
503 +{
504 + return _blocks_filled;
505 +}
506 +
507 +#ifdef ASSERT
508 +inline size_t
509 +ParallelCompactData::RegionData::blocks_filled_count() const
510 +{
511 + return _blocks_filled_count;
512 +}
513 +#endif // #ifdef ASSERT
514 +
445 515 inline void
516 +ParallelCompactData::RegionData::set_blocks_filled()
517 +{
518 + _blocks_filled = true;
519 + // Debug builds count the number of times the table was filled.
520 + DEBUG_ONLY(Atomic::inc_ptr(&_blocks_filled_count));
521 +}
522 +
523 +inline void
446 524 ParallelCompactData::RegionData::set_destination_count(uint count)
447 525 {
448 526 assert(count <= (dc_completed >> dc_shift), "count too large");
449 527 const region_sz_t live_sz = (region_sz_t) live_obj_size();
450 528 _dc_and_los = (count << dc_shift) | live_sz;
451 529 }
452 530
453 531 inline void ParallelCompactData::RegionData::set_live_obj_size(size_t words)
454 532 {
455 533 assert(words <= los_mask, "would overflow");
456 534 _dc_and_los = destination_count_raw() | (region_sz_t)words;
457 535 }
458 536
459 537 inline void ParallelCompactData::RegionData::decrement_destination_count()
460 538 {
461 539 assert(_dc_and_los < dc_claimed, "already claimed");
462 540 assert(_dc_and_los >= dc_one, "count would go negative");
463 541 Atomic::add((int)dc_mask, (volatile int*)&_dc_and_los);
464 542 }
465 543
466 544 inline HeapWord* ParallelCompactData::RegionData::data_location() const
467 545 {
468 546 DEBUG_ONLY(return _data_location;)
469 547 NOT_DEBUG(return NULL;)
470 548 }
471 549
472 550 inline HeapWord* ParallelCompactData::RegionData::highest_ref() const
473 551 {
474 552 DEBUG_ONLY(return _highest_ref;)
475 553 NOT_DEBUG(return NULL;)
476 554 }
477 555
478 556 inline void ParallelCompactData::RegionData::set_data_location(HeapWord* addr)
479 557 {
480 558 DEBUG_ONLY(_data_location = addr;)
481 559 }
482 560
483 561 inline void ParallelCompactData::RegionData::set_completed()
484 562 {
485 563 assert(claimed(), "must be claimed first");
486 564 _dc_and_los = dc_completed | (region_sz_t) live_obj_size();
487 565 }
488 566
489 567 // MT-unsafe claiming of a region. Should only be used during single threaded
490 568 // execution.
491 569 inline bool ParallelCompactData::RegionData::claim_unsafe()
492 570 {
493 571 if (available()) {
494 572 _dc_and_los |= dc_claimed;
495 573 return true;
496 574 }
497 575 return false;
498 576 }
499 577
500 578 inline void ParallelCompactData::RegionData::add_live_obj(size_t words)
501 579 {
502 580 assert(words <= (size_t)los_mask - live_obj_size(), "overflow");
503 581 Atomic::add((int) words, (volatile int*) &_dc_and_los);
504 582 }
505 583
506 584 inline void ParallelCompactData::RegionData::set_highest_ref(HeapWord* addr)
507 585 {
508 586 #ifdef ASSERT
509 587 HeapWord* tmp = _highest_ref;
510 588 while (addr > tmp) {
511 589 tmp = (HeapWord*)Atomic::cmpxchg_ptr(addr, &_highest_ref, tmp);
512 590 }
513 591 #endif // #ifdef ASSERT
514 592 }
515 593
516 594 inline bool ParallelCompactData::RegionData::claim()
517 595 {
518 596 const int los = (int) live_obj_size();
519 597 const int old = Atomic::cmpxchg(dc_claimed | los,
520 598 (volatile int*) &_dc_and_los, los);
521 599 return old == los;
522 600 }
523 601
524 602 inline ParallelCompactData::RegionData*
525 603 ParallelCompactData::region(size_t region_idx) const
526 604 {
527 605 assert(region_idx <= region_count(), "bad arg");
528 606 return _region_data + region_idx;
↓ open down ↓ |
73 lines elided |
↑ open up ↑ |
529 607 }
530 608
531 609 inline size_t
532 610 ParallelCompactData::region(const RegionData* const region_ptr) const
533 611 {
534 612 assert(region_ptr >= _region_data, "bad arg");
535 613 assert(region_ptr <= _region_data + region_count(), "bad arg");
536 614 return pointer_delta(region_ptr, _region_data, sizeof(RegionData));
537 615 }
538 616
617 +inline ParallelCompactData::BlockData*
618 +ParallelCompactData::block(size_t n) const {
619 + assert(n < block_count(), "bad arg");
620 + return _block_data + n;
621 +}
622 +
539 623 inline size_t
540 624 ParallelCompactData::region_offset(const HeapWord* addr) const
541 625 {
542 626 assert(addr >= _region_start, "bad addr");
543 627 assert(addr <= _region_end, "bad addr");
544 628 return (size_t(addr) & RegionAddrOffsetMask) >> LogHeapWordSize;
545 629 }
546 630
547 631 inline size_t
548 632 ParallelCompactData::addr_to_region_idx(const HeapWord* addr) const
549 633 {
550 634 assert(addr >= _region_start, "bad addr");
551 635 assert(addr <= _region_end, "bad addr");
552 636 return pointer_delta(addr, _region_start) >> Log2RegionSize;
553 637 }
554 638
555 639 inline ParallelCompactData::RegionData*
556 640 ParallelCompactData::addr_to_region_ptr(const HeapWord* addr) const
557 641 {
558 642 return region(addr_to_region_idx(addr));
559 643 }
560 644
561 645 inline HeapWord*
562 646 ParallelCompactData::region_to_addr(size_t region) const
563 647 {
564 648 assert(region <= _region_count, "region out of range");
565 649 return _region_start + (region << Log2RegionSize);
566 650 }
567 651
568 652 inline HeapWord*
569 653 ParallelCompactData::region_to_addr(const RegionData* region) const
570 654 {
571 655 return region_to_addr(pointer_delta(region, _region_data,
572 656 sizeof(RegionData)));
573 657 }
574 658
575 659 inline HeapWord*
576 660 ParallelCompactData::region_to_addr(size_t region, size_t offset) const
577 661 {
578 662 assert(region <= _region_count, "region out of range");
579 663 assert(offset < RegionSize, "offset too big"); // This may be too strict.
580 664 return region_to_addr(region) + offset;
581 665 }
582 666
583 667 inline HeapWord*
584 668 ParallelCompactData::region_align_down(HeapWord* addr) const
585 669 {
586 670 assert(addr >= _region_start, "bad addr");
587 671 assert(addr < _region_end + RegionSize, "bad addr");
588 672 return (HeapWord*)(size_t(addr) & RegionAddrMask);
589 673 }
590 674
591 675 inline HeapWord*
592 676 ParallelCompactData::region_align_up(HeapWord* addr) const
593 677 {
594 678 assert(addr >= _region_start, "bad addr");
↓ open down ↓ |
46 lines elided |
↑ open up ↑ |
595 679 assert(addr <= _region_end, "bad addr");
596 680 return region_align_down(addr + RegionSizeOffsetMask);
597 681 }
598 682
599 683 inline bool
600 684 ParallelCompactData::is_region_aligned(HeapWord* addr) const
601 685 {
602 686 return region_offset(addr) == 0;
603 687 }
604 688
689 +inline size_t
690 +ParallelCompactData::block_offset(const HeapWord* addr) const
691 +{
692 + assert(addr >= _region_start, "bad addr");
693 + assert(addr <= _region_end, "bad addr");
694 + return (size_t(addr) & BlockAddrOffsetMask) >> LogHeapWordSize;
695 +}
696 +
697 +inline size_t
698 +ParallelCompactData::addr_to_block_idx(const HeapWord* addr) const
699 +{
700 + assert(addr >= _region_start, "bad addr");
701 + assert(addr <= _region_end, "bad addr");
702 + return pointer_delta(addr, _region_start) >> Log2BlockSize;
703 +}
704 +
705 +inline ParallelCompactData::BlockData*
706 +ParallelCompactData::addr_to_block_ptr(const HeapWord* addr) const
707 +{
708 + return block(addr_to_block_idx(addr));
709 +}
710 +
711 +inline HeapWord*
712 +ParallelCompactData::block_to_addr(size_t block) const
713 +{
714 + assert(block < _block_count, "block out of range");
715 + return _region_start + (block << Log2BlockSize);
716 +}
717 +
718 +inline size_t
719 +ParallelCompactData::region_to_block_idx(size_t region) const
720 +{
721 + return region << Log2BlocksPerRegion;
722 +}
723 +
724 +inline HeapWord*
725 +ParallelCompactData::block_align_down(HeapWord* addr) const
726 +{
727 + assert(addr >= _region_start, "bad addr");
728 + assert(addr < _region_end + RegionSize, "bad addr");
729 + return (HeapWord*)(size_t(addr) & BlockAddrMask);
730 +}
731 +
732 +inline HeapWord*
733 +ParallelCompactData::block_align_up(HeapWord* addr) const
734 +{
735 + assert(addr >= _region_start, "bad addr");
736 + assert(addr <= _region_end, "bad addr");
737 + return block_align_down(addr + BlockSizeOffsetMask);
738 +}
739 +
740 +inline bool
741 +ParallelCompactData::is_block_aligned(HeapWord* addr) const
742 +{
743 + return block_offset(addr) == 0;
744 +}
745 +
605 746 // Abstract closure for use with ParMarkBitMap::iterate(), which will invoke the
606 747 // do_addr() method.
607 748 //
608 749 // The closure is initialized with the number of heap words to process
609 750 // (words_remaining()), and becomes 'full' when it reaches 0. The do_addr()
610 751 // methods in subclasses should update the total as words are processed. Since
611 752 // only one subclass actually uses this mechanism to terminate iteration, the
612 753 // default initial value is > 0. The implementation is here and not in the
613 754 // single subclass that uses it to avoid making is_full() virtual, and thus
614 755 // adding a virtual call per live object.
615 756
616 757 class ParMarkBitMapClosure: public StackObj {
617 758 public:
618 759 typedef ParMarkBitMap::idx_t idx_t;
619 760 typedef ParMarkBitMap::IterationStatus IterationStatus;
620 761
621 762 public:
622 763 inline ParMarkBitMapClosure(ParMarkBitMap* mbm, ParCompactionManager* cm,
623 764 size_t words = max_uintx);
624 765
625 766 inline ParCompactionManager* compaction_manager() const;
626 767 inline ParMarkBitMap* bitmap() const;
627 768 inline size_t words_remaining() const;
628 769 inline bool is_full() const;
629 770 inline HeapWord* source() const;
630 771
631 772 inline void set_source(HeapWord* addr);
632 773
633 774 virtual IterationStatus do_addr(HeapWord* addr, size_t words) = 0;
634 775
635 776 protected:
636 777 inline void decrement_words_remaining(size_t words);
637 778
638 779 private:
639 780 ParMarkBitMap* const _bitmap;
640 781 ParCompactionManager* const _compaction_manager;
641 782 DEBUG_ONLY(const size_t _initial_words_remaining;) // Useful in debugger.
642 783 size_t _words_remaining; // Words left to copy.
643 784
644 785 protected:
645 786 HeapWord* _source; // Next addr that would be read.
646 787 };
647 788
648 789 inline
649 790 ParMarkBitMapClosure::ParMarkBitMapClosure(ParMarkBitMap* bitmap,
650 791 ParCompactionManager* cm,
651 792 size_t words):
652 793 _bitmap(bitmap), _compaction_manager(cm)
653 794 #ifdef ASSERT
654 795 , _initial_words_remaining(words)
655 796 #endif
656 797 {
657 798 _words_remaining = words;
658 799 _source = NULL;
659 800 }
660 801
661 802 inline ParCompactionManager* ParMarkBitMapClosure::compaction_manager() const {
662 803 return _compaction_manager;
663 804 }
664 805
665 806 inline ParMarkBitMap* ParMarkBitMapClosure::bitmap() const {
666 807 return _bitmap;
667 808 }
668 809
669 810 inline size_t ParMarkBitMapClosure::words_remaining() const {
670 811 return _words_remaining;
671 812 }
672 813
673 814 inline bool ParMarkBitMapClosure::is_full() const {
674 815 return words_remaining() == 0;
675 816 }
676 817
677 818 inline HeapWord* ParMarkBitMapClosure::source() const {
678 819 return _source;
679 820 }
680 821
681 822 inline void ParMarkBitMapClosure::set_source(HeapWord* addr) {
682 823 _source = addr;
683 824 }
684 825
685 826 inline void ParMarkBitMapClosure::decrement_words_remaining(size_t words) {
686 827 assert(_words_remaining >= words, "processed too many words");
687 828 _words_remaining -= words;
688 829 }
689 830
690 831 // The UseParallelOldGC collector is a stop-the-world garbage collector that
691 832 // does parts of the collection using parallel threads. The collection includes
692 833 // the tenured generation and the young generation. The permanent generation is
693 834 // collected at the same time as the other two generations but the permanent
694 835 // generation is collect by a single GC thread. The permanent generation is
695 836 // collected serially because of the requirement that during the processing of a
696 837 // klass AAA, any objects reference by AAA must already have been processed.
697 838 // This requirement is enforced by a left (lower address) to right (higher
698 839 // address) sliding compaction.
699 840 //
700 841 // There are four phases of the collection.
701 842 //
702 843 // - marking phase
703 844 // - summary phase
704 845 // - compacting phase
705 846 // - clean up phase
706 847 //
707 848 // Roughly speaking these phases correspond, respectively, to
708 849 // - mark all the live objects
709 850 // - calculate the destination of each object at the end of the collection
710 851 // - move the objects to their destination
711 852 // - update some references and reinitialize some variables
712 853 //
713 854 // These three phases are invoked in PSParallelCompact::invoke_no_policy(). The
714 855 // marking phase is implemented in PSParallelCompact::marking_phase() and does a
715 856 // complete marking of the heap. The summary phase is implemented in
716 857 // PSParallelCompact::summary_phase(). The move and update phase is implemented
717 858 // in PSParallelCompact::compact().
718 859 //
719 860 // A space that is being collected is divided into regions and with each region
720 861 // is associated an object of type ParallelCompactData. Each region is of a
721 862 // fixed size and typically will contain more than 1 object and may have parts
722 863 // of objects at the front and back of the region.
723 864 //
724 865 // region -----+---------------------+----------
725 866 // objects covered [ AAA )[ BBB )[ CCC )[ DDD )
726 867 //
727 868 // The marking phase does a complete marking of all live objects in the heap.
728 869 // The marking also compiles the size of the data for all live objects covered
729 870 // by the region. This size includes the part of any live object spanning onto
730 871 // the region (part of AAA if it is live) from the front, all live objects
731 872 // contained in the region (BBB and/or CCC if they are live), and the part of
732 873 // any live objects covered by the region that extends off the region (part of
733 874 // DDD if it is live). The marking phase uses multiple GC threads and marking
734 875 // is done in a bit array of type ParMarkBitMap. The marking of the bit map is
735 876 // done atomically as is the accumulation of the size of the live objects
736 877 // covered by a region.
737 878 //
738 879 // The summary phase calculates the total live data to the left of each region
739 880 // XXX. Based on that total and the bottom of the space, it can calculate the
740 881 // starting location of the live data in XXX. The summary phase calculates for
741 882 // each region XXX quantites such as
742 883 //
743 884 // - the amount of live data at the beginning of a region from an object
744 885 // entering the region.
745 886 // - the location of the first live data on the region
746 887 // - a count of the number of regions receiving live data from XXX.
747 888 //
748 889 // See ParallelCompactData for precise details. The summary phase also
749 890 // calculates the dense prefix for the compaction. The dense prefix is a
750 891 // portion at the beginning of the space that is not moved. The objects in the
751 892 // dense prefix do need to have their object references updated. See method
752 893 // summarize_dense_prefix().
753 894 //
754 895 // The summary phase is done using 1 GC thread.
755 896 //
756 897 // The compaction phase moves objects to their new location and updates all
757 898 // references in the object.
758 899 //
759 900 // A current exception is that objects that cross a region boundary are moved
760 901 // but do not have their references updated. References are not updated because
761 902 // it cannot easily be determined if the klass pointer KKK for the object AAA
762 903 // has been updated. KKK likely resides in a region to the left of the region
763 904 // containing AAA. These AAA's have there references updated at the end in a
764 905 // clean up phase. See the method PSParallelCompact::update_deferred_objects().
765 906 // An alternate strategy is being investigated for this deferral of updating.
766 907 //
767 908 // Compaction is done on a region basis. A region that is ready to be filled is
768 909 // put on a ready list and GC threads take region off the list and fill them. A
769 910 // region is ready to be filled if it empty of live objects. Such a region may
770 911 // have been initially empty (only contained dead objects) or may have had all
771 912 // its live objects copied out already. A region that compacts into itself is
↓ open down ↓ |
157 lines elided |
↑ open up ↑ |
772 913 // also ready for filling. The ready list is initially filled with empty
773 914 // regions and regions compacting into themselves. There is always at least 1
774 915 // region that can be put on the ready list. The regions are atomically added
775 916 // and removed from the ready list.
776 917
777 918 class PSParallelCompact : AllStatic {
778 919 public:
779 920 // Convenient access to type names.
780 921 typedef ParMarkBitMap::idx_t idx_t;
781 922 typedef ParallelCompactData::RegionData RegionData;
923 + typedef ParallelCompactData::BlockData BlockData;
782 924
783 925 typedef enum {
784 926 perm_space_id, old_space_id, eden_space_id,
785 927 from_space_id, to_space_id, last_space_id
786 928 } SpaceId;
787 929
788 930 public:
789 931 // Inline closure decls
790 932 //
791 933 class IsAliveClosure: public BoolObjectClosure {
792 934 public:
793 935 virtual void do_object(oop p);
794 936 virtual bool do_object_b(oop p);
795 937 };
796 938
797 939 class KeepAliveClosure: public OopClosure {
798 940 private:
799 941 ParCompactionManager* _compaction_manager;
800 942 protected:
801 943 template <class T> inline void do_oop_work(T* p);
802 944 public:
803 945 KeepAliveClosure(ParCompactionManager* cm) : _compaction_manager(cm) { }
804 946 virtual void do_oop(oop* p);
805 947 virtual void do_oop(narrowOop* p);
806 948 };
807 949
808 950 // Current unused
809 951 class FollowRootClosure: public OopsInGenClosure {
810 952 private:
811 953 ParCompactionManager* _compaction_manager;
812 954 public:
813 955 FollowRootClosure(ParCompactionManager* cm) : _compaction_manager(cm) { }
814 956 virtual void do_oop(oop* p);
815 957 virtual void do_oop(narrowOop* p);
816 958 };
817 959
818 960 class FollowStackClosure: public VoidClosure {
819 961 private:
820 962 ParCompactionManager* _compaction_manager;
821 963 public:
822 964 FollowStackClosure(ParCompactionManager* cm) : _compaction_manager(cm) { }
823 965 virtual void do_void();
824 966 };
825 967
826 968 class AdjustPointerClosure: public OopsInGenClosure {
827 969 private:
828 970 bool _is_root;
829 971 public:
830 972 AdjustPointerClosure(bool is_root) : _is_root(is_root) { }
831 973 virtual void do_oop(oop* p);
832 974 virtual void do_oop(narrowOop* p);
833 975 // do not walk from thread stacks to the code cache on this phase
834 976 virtual void do_code_blob(CodeBlob* cb) const { }
835 977 };
836 978
837 979 friend class KeepAliveClosure;
838 980 friend class FollowStackClosure;
839 981 friend class AdjustPointerClosure;
840 982 friend class FollowRootClosure;
841 983 friend class instanceKlassKlass;
842 984 friend class RefProcTaskProxy;
843 985
844 986 private:
845 987 static STWGCTimer _gc_timer;
846 988 static ParallelOldTracer _gc_tracer;
847 989 static elapsedTimer _accumulated_time;
848 990 static unsigned int _total_invocations;
849 991 static unsigned int _maximum_compaction_gc_num;
850 992 static jlong _time_of_last_gc; // ms
851 993 static CollectorCounters* _counters;
852 994 static ParMarkBitMap _mark_bitmap;
853 995 static ParallelCompactData _summary_data;
854 996 static IsAliveClosure _is_alive_closure;
855 997 static SpaceInfo _space_info[last_space_id];
856 998 static bool _print_phases;
857 999 static AdjustPointerClosure _adjust_root_pointer_closure;
858 1000 static AdjustPointerClosure _adjust_pointer_closure;
859 1001
860 1002 // Reference processing (used in ...follow_contents)
861 1003 static ReferenceProcessor* _ref_processor;
862 1004
863 1005 // Updated location of intArrayKlassObj.
864 1006 static klassOop _updated_int_array_klass_obj;
865 1007
866 1008 // Values computed at initialization and used by dead_wood_limiter().
867 1009 static double _dwl_mean;
868 1010 static double _dwl_std_dev;
869 1011 static double _dwl_first_term;
870 1012 static double _dwl_adjustment;
871 1013 #ifdef ASSERT
872 1014 static bool _dwl_initialized;
873 1015 #endif // #ifdef ASSERT
874 1016
875 1017 private:
876 1018 // Closure accessors
877 1019 static OopClosure* adjust_pointer_closure() { return (OopClosure*)&_adjust_pointer_closure; }
878 1020 static OopClosure* adjust_root_pointer_closure() { return (OopClosure*)&_adjust_root_pointer_closure; }
879 1021 static BoolObjectClosure* is_alive_closure() { return (BoolObjectClosure*)&_is_alive_closure; }
880 1022
881 1023 static void initialize_space_info();
882 1024
883 1025 // Return true if details about individual phases should be printed.
884 1026 static inline bool print_phases();
885 1027
886 1028 // Clear the marking bitmap and summary data that cover the specified space.
887 1029 static void clear_data_covering_space(SpaceId id);
888 1030
889 1031 static void pre_compact(PreGCValues* pre_gc_values);
890 1032 static void post_compact();
891 1033
892 1034 // Mark live objects
893 1035 static void marking_phase(ParCompactionManager* cm, bool maximum_heap_compaction);
894 1036 static void follow_weak_klass_links();
895 1037 static void follow_mdo_weak_refs();
896 1038
897 1039 template <class T> static inline void adjust_pointer(T* p, bool is_root);
898 1040 static void adjust_root_pointer(oop* p) { adjust_pointer(p, true); }
899 1041
900 1042 template <class T>
901 1043 static inline void follow_root(ParCompactionManager* cm, T* p);
902 1044
903 1045 // Compute the dense prefix for the designated space. This is an experimental
904 1046 // implementation currently not used in production.
905 1047 static HeapWord* compute_dense_prefix_via_density(const SpaceId id,
906 1048 bool maximum_compaction);
907 1049
908 1050 // Methods used to compute the dense prefix.
909 1051
910 1052 // Compute the value of the normal distribution at x = density. The mean and
911 1053 // standard deviation are values saved by initialize_dead_wood_limiter().
912 1054 static inline double normal_distribution(double density);
913 1055
914 1056 // Initialize the static vars used by dead_wood_limiter().
915 1057 static void initialize_dead_wood_limiter();
916 1058
917 1059 // Return the percentage of space that can be treated as "dead wood" (i.e.,
918 1060 // not reclaimed).
919 1061 static double dead_wood_limiter(double density, size_t min_percent);
920 1062
921 1063 // Find the first (left-most) region in the range [beg, end) that has at least
922 1064 // dead_words of dead space to the left. The argument beg must be the first
923 1065 // region in the space that is not completely live.
924 1066 static RegionData* dead_wood_limit_region(const RegionData* beg,
925 1067 const RegionData* end,
926 1068 size_t dead_words);
927 1069
928 1070 // Return a pointer to the first region in the range [beg, end) that is not
929 1071 // completely full.
930 1072 static RegionData* first_dead_space_region(const RegionData* beg,
931 1073 const RegionData* end);
932 1074
933 1075 // Return a value indicating the benefit or 'yield' if the compacted region
934 1076 // were to start (or equivalently if the dense prefix were to end) at the
935 1077 // candidate region. Higher values are better.
936 1078 //
937 1079 // The value is based on the amount of space reclaimed vs. the costs of (a)
938 1080 // updating references in the dense prefix plus (b) copying objects and
939 1081 // updating references in the compacted region.
940 1082 static inline double reclaimed_ratio(const RegionData* const candidate,
941 1083 HeapWord* const bottom,
942 1084 HeapWord* const top,
943 1085 HeapWord* const new_top);
944 1086
945 1087 // Compute the dense prefix for the designated space.
946 1088 static HeapWord* compute_dense_prefix(const SpaceId id,
947 1089 bool maximum_compaction);
948 1090
949 1091 // Return true if dead space crosses onto the specified Region; bit must be
950 1092 // the bit index corresponding to the first word of the Region.
951 1093 static inline bool dead_space_crosses_boundary(const RegionData* region,
952 1094 idx_t bit);
953 1095
954 1096 // Summary phase utility routine to fill dead space (if any) at the dense
955 1097 // prefix boundary. Should only be called if the the dense prefix is
956 1098 // non-empty.
957 1099 static void fill_dense_prefix_end(SpaceId id);
958 1100
959 1101 // Clear the summary data source_region field for the specified addresses.
960 1102 static void clear_source_region(HeapWord* beg_addr, HeapWord* end_addr);
961 1103
962 1104 #ifndef PRODUCT
963 1105 // Routines to provoke splitting a young gen space (ParallelOldGCSplitALot).
964 1106
965 1107 // Fill the region [start, start + words) with live object(s). Only usable
966 1108 // for the old and permanent generations.
967 1109 static void fill_with_live_objects(SpaceId id, HeapWord* const start,
968 1110 size_t words);
969 1111 // Include the new objects in the summary data.
970 1112 static void summarize_new_objects(SpaceId id, HeapWord* start);
971 1113
972 1114 // Add live objects to a survivor space since it's rare that both survivors
973 1115 // are non-empty.
974 1116 static void provoke_split_fill_survivor(SpaceId id);
975 1117
976 1118 // Add live objects and/or choose the dense prefix to provoke splitting.
977 1119 static void provoke_split(bool & maximum_compaction);
978 1120 #endif
979 1121
↓ open down ↓ |
188 lines elided |
↑ open up ↑ |
980 1122 static void summarize_spaces_quick();
981 1123 static void summarize_space(SpaceId id, bool maximum_compaction);
982 1124 static void summary_phase(ParCompactionManager* cm, bool maximum_compaction);
983 1125
984 1126 // Adjust addresses in roots. Does not adjust addresses in heap.
985 1127 static void adjust_roots();
986 1128
987 1129 // Serial code executed in preparation for the compaction phase.
988 1130 static void compact_prologue();
989 1131
1132 + DEBUG_ONLY(static void write_block_fill_histogram(outputStream* const out);)
1133 +
990 1134 // Move objects to new locations.
991 1135 static void compact_perm(ParCompactionManager* cm);
992 1136 static void compact();
993 1137
994 1138 // Add available regions to the stack and draining tasks to the task queue.
995 1139 static void enqueue_region_draining_tasks(GCTaskQueue* q,
996 1140 uint parallel_gc_threads);
997 1141
998 1142 // Add dense prefix update tasks to the task queue.
999 1143 static void enqueue_dense_prefix_tasks(GCTaskQueue* q,
1000 1144 uint parallel_gc_threads);
1001 1145
1002 1146 // Add region stealing tasks to the task queue.
1003 1147 static void enqueue_region_stealing_tasks(
1004 1148 GCTaskQueue* q,
1005 1149 ParallelTaskTerminator* terminator_ptr,
1006 1150 uint parallel_gc_threads);
1007 1151
1008 1152 // If objects are left in eden after a collection, try to move the boundary
1009 1153 // and absorb them into the old gen. Returns true if eden was emptied.
1010 1154 static bool absorb_live_data_from_eden(PSAdaptiveSizePolicy* size_policy,
1011 1155 PSYoungGen* young_gen,
1012 1156 PSOldGen* old_gen);
1013 1157
1014 1158 // Reset time since last full gc
1015 1159 static void reset_millis_since_last_gc();
1016 1160
1017 1161 protected:
1018 1162 #ifdef VALIDATE_MARK_SWEEP
1019 1163 static GrowableArray<void*>* _root_refs_stack;
1020 1164 static GrowableArray<oop> * _live_oops;
1021 1165 static GrowableArray<oop> * _live_oops_moved_to;
1022 1166 static GrowableArray<size_t>* _live_oops_size;
1023 1167 static size_t _live_oops_index;
1024 1168 static size_t _live_oops_index_at_perm;
1025 1169 static GrowableArray<void*>* _other_refs_stack;
1026 1170 static GrowableArray<void*>* _adjusted_pointers;
1027 1171 static bool _pointer_tracking;
1028 1172 static bool _root_tracking;
1029 1173
1030 1174 // The following arrays are saved since the time of the last GC and
1031 1175 // assist in tracking down problems where someone has done an errant
1032 1176 // store into the heap, usually to an oop that wasn't properly
1033 1177 // handleized across a GC. If we crash or otherwise fail before the
1034 1178 // next GC, we can query these arrays to find out the object we had
1035 1179 // intended to do the store to (assuming it is still alive) and the
1036 1180 // offset within that object. Covered under RecordMarkSweepCompaction.
1037 1181 static GrowableArray<HeapWord*> * _cur_gc_live_oops;
1038 1182 static GrowableArray<HeapWord*> * _cur_gc_live_oops_moved_to;
1039 1183 static GrowableArray<size_t>* _cur_gc_live_oops_size;
1040 1184 static GrowableArray<HeapWord*> * _last_gc_live_oops;
1041 1185 static GrowableArray<HeapWord*> * _last_gc_live_oops_moved_to;
1042 1186 static GrowableArray<size_t>* _last_gc_live_oops_size;
1043 1187 #endif
1044 1188
1045 1189 public:
1046 1190 class MarkAndPushClosure: public OopClosure {
1047 1191 private:
1048 1192 ParCompactionManager* _compaction_manager;
1049 1193 public:
1050 1194 MarkAndPushClosure(ParCompactionManager* cm) : _compaction_manager(cm) { }
1051 1195 virtual void do_oop(oop* p);
1052 1196 virtual void do_oop(narrowOop* p);
1053 1197 };
1054 1198
1055 1199 PSParallelCompact();
1056 1200
1057 1201 // Convenient accessor for Universe::heap().
1058 1202 static ParallelScavengeHeap* gc_heap() {
1059 1203 return (ParallelScavengeHeap*)Universe::heap();
1060 1204 }
1061 1205
1062 1206 static void invoke(bool maximum_heap_compaction);
1063 1207 static bool invoke_no_policy(bool maximum_heap_compaction);
1064 1208
1065 1209 static void post_initialize();
1066 1210 // Perform initialization for PSParallelCompact that requires
1067 1211 // allocations. This should be called during the VM initialization
1068 1212 // at a pointer where it would be appropriate to return a JNI_ENOMEM
1069 1213 // in the event of a failure.
1070 1214 static bool initialize();
1071 1215
1072 1216 // Public accessors
1073 1217 static elapsedTimer* accumulated_time() { return &_accumulated_time; }
1074 1218 static unsigned int total_invocations() { return _total_invocations; }
1075 1219 static CollectorCounters* counters() { return _counters; }
1076 1220
1077 1221 // Used to add tasks
1078 1222 static GCTaskManager* const gc_task_manager();
1079 1223 static klassOop updated_int_array_klass_obj() {
1080 1224 return _updated_int_array_klass_obj;
1081 1225 }
1082 1226
1083 1227 // Marking support
1084 1228 static inline bool mark_obj(oop obj);
1085 1229 // Check mark and maybe push on marking stack
1086 1230 template <class T> static inline void mark_and_push(ParCompactionManager* cm,
1087 1231 T* p);
1088 1232
1089 1233 // Compaction support.
1090 1234 // Return true if p is in the range [beg_addr, end_addr).
1091 1235 static inline bool is_in(HeapWord* p, HeapWord* beg_addr, HeapWord* end_addr);
1092 1236 static inline bool is_in(oop* p, HeapWord* beg_addr, HeapWord* end_addr);
1093 1237
1094 1238 // Convenience wrappers for per-space data kept in _space_info.
1095 1239 static inline MutableSpace* space(SpaceId space_id);
1096 1240 static inline HeapWord* new_top(SpaceId space_id);
1097 1241 static inline HeapWord* dense_prefix(SpaceId space_id);
1098 1242 static inline ObjectStartArray* start_array(SpaceId space_id);
1099 1243
1100 1244 // Return true if the klass should be updated.
1101 1245 static inline bool should_update_klass(klassOop k);
1102 1246
1103 1247 // Move and update the live objects in the specified space.
1104 1248 static void move_and_update(ParCompactionManager* cm, SpaceId space_id);
1105 1249
1106 1250 // Process the end of the given region range in the dense prefix.
1107 1251 // This includes saving any object not updated.
1108 1252 static void dense_prefix_regions_epilogue(ParCompactionManager* cm,
1109 1253 size_t region_start_index,
1110 1254 size_t region_end_index,
1111 1255 idx_t exiting_object_offset,
1112 1256 idx_t region_offset_start,
1113 1257 idx_t region_offset_end);
1114 1258
1115 1259 // Update a region in the dense prefix. For each live object
1116 1260 // in the region, update it's interior references. For each
1117 1261 // dead object, fill it with deadwood. Dead space at the end
1118 1262 // of a region range will be filled to the start of the next
1119 1263 // live object regardless of the region_index_end. None of the
1120 1264 // objects in the dense prefix move and dead space is dead
1121 1265 // (holds only dead objects that don't need any processing), so
1122 1266 // dead space can be filled in any order.
1123 1267 static void update_and_deadwood_in_dense_prefix(ParCompactionManager* cm,
1124 1268 SpaceId space_id,
1125 1269 size_t region_index_start,
1126 1270 size_t region_index_end);
1127 1271
1128 1272 // Return the address of the count + 1st live word in the range [beg, end).
1129 1273 static HeapWord* skip_live_words(HeapWord* beg, HeapWord* end, size_t count);
1130 1274
1131 1275 // Return the address of the word to be copied to dest_addr, which must be
1132 1276 // aligned to a region boundary.
1133 1277 static HeapWord* first_src_addr(HeapWord* const dest_addr,
1134 1278 SpaceId src_space_id,
1135 1279 size_t src_region_idx);
1136 1280
1137 1281 // Determine the next source region, set closure.source() to the start of the
1138 1282 // new region return the region index. Parameter end_addr is the address one
1139 1283 // beyond the end of source range just processed. If necessary, switch to a
1140 1284 // new source space and set src_space_id (in-out parameter) and src_space_top
1141 1285 // (out parameter) accordingly.
1142 1286 static size_t next_src_region(MoveAndUpdateClosure& closure,
1143 1287 SpaceId& src_space_id,
1144 1288 HeapWord*& src_space_top,
1145 1289 HeapWord* end_addr);
1146 1290
1147 1291 // Decrement the destination count for each non-empty source region in the
1148 1292 // range [beg_region, region(region_align_up(end_addr))). If the destination
1149 1293 // count for a region goes to 0 and it needs to be filled, enqueue it.
1150 1294 static void decrement_destination_counts(ParCompactionManager* cm,
↓ open down ↓ |
151 lines elided |
↑ open up ↑ |
1151 1295 SpaceId src_space_id,
1152 1296 size_t beg_region,
1153 1297 HeapWord* end_addr);
1154 1298
1155 1299 // Fill a region, copying objects from one or more source regions.
1156 1300 static void fill_region(ParCompactionManager* cm, size_t region_idx);
1157 1301 static void fill_and_update_region(ParCompactionManager* cm, size_t region) {
1158 1302 fill_region(cm, region);
1159 1303 }
1160 1304
1305 + // Fill in the block table for the specified region.
1306 + static void fill_blocks(size_t region_idx);
1307 +
1161 1308 // Update the deferred objects in the space.
1162 1309 static void update_deferred_objects(ParCompactionManager* cm, SpaceId id);
1163 1310
1164 1311 static ParMarkBitMap* mark_bitmap() { return &_mark_bitmap; }
1165 1312 static ParallelCompactData& summary_data() { return _summary_data; }
1166 1313
1167 1314 static inline void adjust_pointer(oop* p) { adjust_pointer(p, false); }
1168 1315 static inline void adjust_pointer(narrowOop* p) { adjust_pointer(p, false); }
1169 1316
1170 1317 // Reference Processing
1171 1318 static ReferenceProcessor* const ref_processor() { return _ref_processor; }
1172 1319
1173 1320 static STWGCTimer* gc_timer() { return &_gc_timer; }
1174 1321
1175 1322 // Return the SpaceId for the given address.
1176 1323 static SpaceId space_id(HeapWord* addr);
1177 1324
1178 1325 // Time since last full gc (in milliseconds).
1179 1326 static jlong millis_since_last_gc();
1180 1327
1181 1328 #ifdef VALIDATE_MARK_SWEEP
1182 1329 static void track_adjusted_pointer(void* p, bool isroot);
1183 1330 static void check_adjust_pointer(void* p);
1184 1331 static void track_interior_pointers(oop obj);
1185 1332 static void check_interior_pointers();
1186 1333
1187 1334 static void reset_live_oop_tracking(bool at_perm);
1188 1335 static void register_live_oop(oop p, size_t size);
1189 1336 static void validate_live_oop(oop p, size_t size);
1190 1337 static void live_oop_moved_to(HeapWord* q, size_t size, HeapWord* compaction_top);
1191 1338 static void compaction_complete();
1192 1339
1193 1340 // Querying operation of RecordMarkSweepCompaction results.
1194 1341 // Finds and prints the current base oop and offset for a word
1195 1342 // within an oop that was live during the last GC. Helpful for
1196 1343 // tracking down heap stomps.
1197 1344 static void print_new_location_of_heap_address(HeapWord* q);
1198 1345 #endif // #ifdef VALIDATE_MARK_SWEEP
1199 1346
1200 1347 // Call backs for class unloading
1201 1348 // Update subklass/sibling/implementor links at end of marking.
1202 1349 static void revisit_weak_klass_link(ParCompactionManager* cm, Klass* k);
1203 1350
1204 1351 // Clear unmarked oops in MDOs at the end of marking.
1205 1352 static void revisit_mdo(ParCompactionManager* cm, DataLayout* p);
1206 1353
1207 1354 #ifndef PRODUCT
1208 1355 // Debugging support.
1209 1356 static const char* space_names[last_space_id];
1210 1357 static void print_region_ranges();
1211 1358 static void print_dense_prefix_stats(const char* const algorithm,
1212 1359 const SpaceId id,
1213 1360 const bool maximum_compaction,
1214 1361 HeapWord* const addr);
1215 1362 static void summary_phase_msg(SpaceId dst_space_id,
1216 1363 HeapWord* dst_beg, HeapWord* dst_end,
1217 1364 SpaceId src_space_id,
1218 1365 HeapWord* src_beg, HeapWord* src_end);
1219 1366 #endif // #ifndef PRODUCT
1220 1367
1221 1368 #ifdef ASSERT
1222 1369 // Sanity check the new location of a word in the heap.
1223 1370 static inline void check_new_location(HeapWord* old_addr, HeapWord* new_addr);
1224 1371 // Verify that all the regions have been emptied.
1225 1372 static void verify_complete(SpaceId space_id);
1226 1373 #endif // #ifdef ASSERT
1227 1374 };
1228 1375
1229 1376 inline bool PSParallelCompact::mark_obj(oop obj) {
1230 1377 const int obj_size = obj->size();
1231 1378 if (mark_bitmap()->mark_obj(obj, obj_size)) {
1232 1379 _summary_data.add_obj(obj, obj_size);
1233 1380 return true;
1234 1381 } else {
1235 1382 return false;
1236 1383 }
1237 1384 }
1238 1385
1239 1386 template <class T>
1240 1387 inline void PSParallelCompact::follow_root(ParCompactionManager* cm, T* p) {
1241 1388 assert(!Universe::heap()->is_in_reserved(p),
1242 1389 "roots shouldn't be things within the heap");
1243 1390 #ifdef VALIDATE_MARK_SWEEP
1244 1391 if (ValidateMarkSweep) {
1245 1392 guarantee(!_root_refs_stack->contains(p), "should only be in here once");
1246 1393 _root_refs_stack->push(p);
1247 1394 }
1248 1395 #endif
1249 1396 T heap_oop = oopDesc::load_heap_oop(p);
1250 1397 if (!oopDesc::is_null(heap_oop)) {
1251 1398 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
1252 1399 if (mark_bitmap()->is_unmarked(obj)) {
1253 1400 if (mark_obj(obj)) {
1254 1401 obj->follow_contents(cm);
1255 1402 }
1256 1403 }
1257 1404 }
1258 1405 cm->follow_marking_stacks();
1259 1406 }
1260 1407
1261 1408 template <class T>
1262 1409 inline void PSParallelCompact::mark_and_push(ParCompactionManager* cm, T* p) {
1263 1410 T heap_oop = oopDesc::load_heap_oop(p);
1264 1411 if (!oopDesc::is_null(heap_oop)) {
1265 1412 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
1266 1413 if (mark_bitmap()->is_unmarked(obj) && mark_obj(obj)) {
1267 1414 cm->push(obj);
1268 1415 }
1269 1416 }
1270 1417 }
1271 1418
1272 1419 template <class T>
1273 1420 inline void PSParallelCompact::adjust_pointer(T* p, bool isroot) {
1274 1421 T heap_oop = oopDesc::load_heap_oop(p);
1275 1422 if (!oopDesc::is_null(heap_oop)) {
1276 1423 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
1277 1424 oop new_obj = (oop)summary_data().calc_new_pointer(obj);
1278 1425 assert(new_obj != NULL || // is forwarding ptr?
1279 1426 obj->is_shared(), // never forwarded?
1280 1427 "should be forwarded");
1281 1428 // Just always do the update unconditionally?
1282 1429 if (new_obj != NULL) {
1283 1430 assert(Universe::heap()->is_in_reserved(new_obj),
1284 1431 "should be in object space");
1285 1432 oopDesc::encode_store_heap_oop_not_null(p, new_obj);
1286 1433 }
1287 1434 }
1288 1435 VALIDATE_MARK_SWEEP_ONLY(track_adjusted_pointer(p, isroot));
1289 1436 }
1290 1437
1291 1438 template <class T>
1292 1439 inline void PSParallelCompact::KeepAliveClosure::do_oop_work(T* p) {
1293 1440 #ifdef VALIDATE_MARK_SWEEP
1294 1441 if (ValidateMarkSweep) {
1295 1442 if (!Universe::heap()->is_in_reserved(p)) {
1296 1443 _root_refs_stack->push(p);
1297 1444 } else {
1298 1445 _other_refs_stack->push(p);
1299 1446 }
1300 1447 }
1301 1448 #endif
1302 1449 mark_and_push(_compaction_manager, p);
1303 1450 }
1304 1451
1305 1452 inline bool PSParallelCompact::print_phases() {
1306 1453 return _print_phases;
1307 1454 }
1308 1455
1309 1456 inline double PSParallelCompact::normal_distribution(double density) {
1310 1457 assert(_dwl_initialized, "uninitialized");
1311 1458 const double squared_term = (density - _dwl_mean) / _dwl_std_dev;
1312 1459 return _dwl_first_term * exp(-0.5 * squared_term * squared_term);
1313 1460 }
1314 1461
1315 1462 inline bool
1316 1463 PSParallelCompact::dead_space_crosses_boundary(const RegionData* region,
1317 1464 idx_t bit)
1318 1465 {
1319 1466 assert(bit > 0, "cannot call this for the first bit/region");
1320 1467 assert(_summary_data.region_to_addr(region) == _mark_bitmap.bit_to_addr(bit),
1321 1468 "sanity check");
1322 1469
1323 1470 // Dead space crosses the boundary if (1) a partial object does not extend
1324 1471 // onto the region, (2) an object does not start at the beginning of the
1325 1472 // region, and (3) an object does not end at the end of the prior region.
1326 1473 return region->partial_obj_size() == 0 &&
1327 1474 !_mark_bitmap.is_obj_beg(bit) &&
1328 1475 !_mark_bitmap.is_obj_end(bit - 1);
1329 1476 }
1330 1477
1331 1478 inline bool
1332 1479 PSParallelCompact::is_in(HeapWord* p, HeapWord* beg_addr, HeapWord* end_addr) {
1333 1480 return p >= beg_addr && p < end_addr;
1334 1481 }
1335 1482
1336 1483 inline bool
1337 1484 PSParallelCompact::is_in(oop* p, HeapWord* beg_addr, HeapWord* end_addr) {
1338 1485 return is_in((HeapWord*)p, beg_addr, end_addr);
1339 1486 }
1340 1487
1341 1488 inline MutableSpace* PSParallelCompact::space(SpaceId id) {
1342 1489 assert(id < last_space_id, "id out of range");
1343 1490 return _space_info[id].space();
1344 1491 }
1345 1492
1346 1493 inline HeapWord* PSParallelCompact::new_top(SpaceId id) {
1347 1494 assert(id < last_space_id, "id out of range");
1348 1495 return _space_info[id].new_top();
1349 1496 }
1350 1497
1351 1498 inline HeapWord* PSParallelCompact::dense_prefix(SpaceId id) {
1352 1499 assert(id < last_space_id, "id out of range");
1353 1500 return _space_info[id].dense_prefix();
1354 1501 }
1355 1502
1356 1503 inline ObjectStartArray* PSParallelCompact::start_array(SpaceId id) {
1357 1504 assert(id < last_space_id, "id out of range");
1358 1505 return _space_info[id].start_array();
1359 1506 }
1360 1507
1361 1508 inline bool PSParallelCompact::should_update_klass(klassOop k) {
1362 1509 return ((HeapWord*) k) >= dense_prefix(perm_space_id);
1363 1510 }
1364 1511
1365 1512 #ifdef ASSERT
1366 1513 inline void
1367 1514 PSParallelCompact::check_new_location(HeapWord* old_addr, HeapWord* new_addr)
1368 1515 {
1369 1516 assert(old_addr >= new_addr || space_id(old_addr) != space_id(new_addr),
1370 1517 "must move left or to a different space");
1371 1518 assert(is_object_aligned((intptr_t)old_addr) && is_object_aligned((intptr_t)new_addr),
1372 1519 "checking alignment");
1373 1520 }
1374 1521 #endif // ASSERT
1375 1522
1376 1523 class MoveAndUpdateClosure: public ParMarkBitMapClosure {
1377 1524 public:
1378 1525 inline MoveAndUpdateClosure(ParMarkBitMap* bitmap, ParCompactionManager* cm,
1379 1526 ObjectStartArray* start_array,
1380 1527 HeapWord* destination, size_t words);
1381 1528
1382 1529 // Accessors.
1383 1530 HeapWord* destination() const { return _destination; }
1384 1531
1385 1532 // If the object will fit (size <= words_remaining()), copy it to the current
1386 1533 // destination, update the interior oops and the start array and return either
1387 1534 // full (if the closure is full) or incomplete. If the object will not fit,
1388 1535 // return would_overflow.
1389 1536 virtual IterationStatus do_addr(HeapWord* addr, size_t size);
1390 1537
1391 1538 // Copy enough words to fill this closure, starting at source(). Interior
1392 1539 // oops and the start array are not updated. Return full.
1393 1540 IterationStatus copy_until_full();
1394 1541
1395 1542 // Copy enough words to fill this closure or to the end of an object,
1396 1543 // whichever is smaller, starting at source(). Interior oops and the start
1397 1544 // array are not updated.
1398 1545 void copy_partial_obj();
1399 1546
1400 1547 protected:
1401 1548 // Update variables to indicate that word_count words were processed.
1402 1549 inline void update_state(size_t word_count);
1403 1550
1404 1551 protected:
1405 1552 ObjectStartArray* const _start_array;
1406 1553 HeapWord* _destination; // Next addr to be written.
1407 1554 };
1408 1555
1409 1556 inline
1410 1557 MoveAndUpdateClosure::MoveAndUpdateClosure(ParMarkBitMap* bitmap,
1411 1558 ParCompactionManager* cm,
1412 1559 ObjectStartArray* start_array,
1413 1560 HeapWord* destination,
1414 1561 size_t words) :
1415 1562 ParMarkBitMapClosure(bitmap, cm, words), _start_array(start_array)
1416 1563 {
1417 1564 _destination = destination;
1418 1565 }
1419 1566
1420 1567 inline void MoveAndUpdateClosure::update_state(size_t words)
1421 1568 {
1422 1569 decrement_words_remaining(words);
1423 1570 _source += words;
1424 1571 _destination += words;
1425 1572 }
1426 1573
1427 1574 class UpdateOnlyClosure: public ParMarkBitMapClosure {
1428 1575 private:
1429 1576 const PSParallelCompact::SpaceId _space_id;
1430 1577 ObjectStartArray* const _start_array;
1431 1578
1432 1579 public:
1433 1580 UpdateOnlyClosure(ParMarkBitMap* mbm,
1434 1581 ParCompactionManager* cm,
1435 1582 PSParallelCompact::SpaceId space_id);
1436 1583
1437 1584 // Update the object.
1438 1585 virtual IterationStatus do_addr(HeapWord* addr, size_t words);
1439 1586
1440 1587 inline void do_addr(HeapWord* addr);
1441 1588 };
1442 1589
1443 1590 inline void UpdateOnlyClosure::do_addr(HeapWord* addr)
1444 1591 {
1445 1592 _start_array->allocate_block(addr);
1446 1593 oop(addr)->update_contents(compaction_manager());
1447 1594 }
1448 1595
1449 1596 class FillClosure: public ParMarkBitMapClosure
1450 1597 {
1451 1598 public:
1452 1599 FillClosure(ParCompactionManager* cm, PSParallelCompact::SpaceId space_id) :
1453 1600 ParMarkBitMapClosure(PSParallelCompact::mark_bitmap(), cm),
1454 1601 _start_array(PSParallelCompact::start_array(space_id))
1455 1602 {
1456 1603 assert(space_id == PSParallelCompact::perm_space_id ||
1457 1604 space_id == PSParallelCompact::old_space_id,
1458 1605 "cannot use FillClosure in the young gen");
1459 1606 }
1460 1607
1461 1608 virtual IterationStatus do_addr(HeapWord* addr, size_t size) {
1462 1609 CollectedHeap::fill_with_objects(addr, size);
1463 1610 HeapWord* const end = addr + size;
1464 1611 do {
1465 1612 _start_array->allocate_block(addr);
1466 1613 addr += oop(addr)->size();
1467 1614 } while (addr < end);
1468 1615 return ParMarkBitMap::incomplete;
1469 1616 }
1470 1617
1471 1618 private:
1472 1619 ObjectStartArray* const _start_array;
1473 1620 };
1474 1621
1475 1622 #endif // SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PSPARALLELCOMPACT_HPP
↓ open down ↓ |
305 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX