Print this page
rev 4520 : 7066063: CMS: "Conservation Principle" assert failed
Summary: Add call to coalBirth() in CompactibleFreeListSpace::reset()
Reviewed-by: ysr, jmasa
Split |
Split |
Close |
Expand all |
Collapse all |
--- old/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp
+++ new/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp
1 1 /*
2 2 * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
3 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 4 *
5 5 * This code is free software; you can redistribute it and/or modify it
6 6 * under the terms of the GNU General Public License version 2 only, as
7 7 * published by the Free Software Foundation.
8 8 *
9 9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 12 * version 2 for more details (a copy is included in the LICENSE file that
13 13 * accompanied this code).
14 14 *
15 15 * You should have received a copy of the GNU General Public License version
16 16 * 2 along with this work; if not, write to the Free Software Foundation,
17 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 18 *
19 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 20 * or visit www.oracle.com if you need additional information or have any
21 21 * questions.
22 22 *
23 23 */
24 24
25 25 #include "precompiled.hpp"
26 26 #include "gc_implementation/concurrentMarkSweep/cmsLockVerifier.hpp"
27 27 #include "gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp"
28 28 #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.inline.hpp"
29 29 #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.hpp"
30 30 #include "gc_implementation/shared/liveRange.hpp"
31 31 #include "gc_implementation/shared/spaceDecorator.hpp"
32 32 #include "gc_interface/collectedHeap.hpp"
33 33 #include "memory/allocation.inline.hpp"
34 34 #include "memory/blockOffsetTable.inline.hpp"
35 35 #include "memory/resourceArea.hpp"
36 36 #include "memory/universe.inline.hpp"
37 37 #include "oops/oop.inline.hpp"
38 38 #include "runtime/globals.hpp"
39 39 #include "runtime/handles.inline.hpp"
40 40 #include "runtime/init.hpp"
41 41 #include "runtime/java.hpp"
42 42 #include "runtime/vmThread.hpp"
43 43 #include "utilities/copy.hpp"
44 44
45 45 /////////////////////////////////////////////////////////////////////////
46 46 //// CompactibleFreeListSpace
47 47 /////////////////////////////////////////////////////////////////////////
48 48
49 49 // highest ranked free list lock rank
50 50 int CompactibleFreeListSpace::_lockRank = Mutex::leaf + 3;
51 51
52 52 // Defaults are 0 so things will break badly if incorrectly initialized.
53 53 size_t CompactibleFreeListSpace::IndexSetStart = 0;
54 54 size_t CompactibleFreeListSpace::IndexSetStride = 0;
55 55
56 56 size_t MinChunkSize = 0;
57 57
58 58 void CompactibleFreeListSpace::set_cms_values() {
59 59 // Set CMS global values
60 60 assert(MinChunkSize == 0, "already set");
61 61
62 62 // MinChunkSize should be a multiple of MinObjAlignment and be large enough
63 63 // for chunks to contain a FreeChunk.
64 64 size_t min_chunk_size_in_bytes = align_size_up(sizeof(FreeChunk), MinObjAlignmentInBytes);
65 65 MinChunkSize = min_chunk_size_in_bytes / BytesPerWord;
66 66
67 67 assert(IndexSetStart == 0 && IndexSetStride == 0, "already set");
68 68 IndexSetStart = MinChunkSize;
69 69 IndexSetStride = MinObjAlignment;
70 70 }
71 71
72 72 // Constructor
73 73 CompactibleFreeListSpace::CompactibleFreeListSpace(BlockOffsetSharedArray* bs,
74 74 MemRegion mr, bool use_adaptive_freelists,
75 75 FreeBlockDictionary<FreeChunk>::DictionaryChoice dictionaryChoice) :
76 76 _dictionaryChoice(dictionaryChoice),
77 77 _adaptive_freelists(use_adaptive_freelists),
78 78 _bt(bs, mr),
79 79 // free list locks are in the range of values taken by _lockRank
80 80 // This range currently is [_leaf+2, _leaf+3]
81 81 // Note: this requires that CFLspace c'tors
82 82 // are called serially in the order in which the locks are
83 83 // are acquired in the program text. This is true today.
84 84 _freelistLock(_lockRank--, "CompactibleFreeListSpace._lock", true),
85 85 _parDictionaryAllocLock(Mutex::leaf - 1, // == rank(ExpandHeap_lock) - 1
86 86 "CompactibleFreeListSpace._dict_par_lock", true),
87 87 _rescan_task_size(CardTableModRefBS::card_size_in_words * BitsPerWord *
88 88 CMSRescanMultiple),
89 89 _marking_task_size(CardTableModRefBS::card_size_in_words * BitsPerWord *
90 90 CMSConcMarkMultiple),
91 91 _collector(NULL)
92 92 {
93 93 assert(sizeof(FreeChunk) / BytesPerWord <= MinChunkSize,
94 94 "FreeChunk is larger than expected");
95 95 _bt.set_space(this);
96 96 initialize(mr, SpaceDecorator::Clear, SpaceDecorator::Mangle);
97 97 // We have all of "mr", all of which we place in the dictionary
98 98 // as one big chunk. We'll need to decide here which of several
99 99 // possible alternative dictionary implementations to use. For
100 100 // now the choice is easy, since we have only one working
101 101 // implementation, namely, the simple binary tree (splaying
102 102 // temporarily disabled).
103 103 switch (dictionaryChoice) {
104 104 case FreeBlockDictionary<FreeChunk>::dictionarySplayTree:
105 105 case FreeBlockDictionary<FreeChunk>::dictionarySkipList:
106 106 default:
107 107 warning("dictionaryChoice: selected option not understood; using"
108 108 " default BinaryTreeDictionary implementation instead.");
109 109 case FreeBlockDictionary<FreeChunk>::dictionaryBinaryTree:
110 110 _dictionary = new BinaryTreeDictionary<FreeChunk>(mr, use_adaptive_freelists);
111 111 break;
112 112 }
113 113 assert(_dictionary != NULL, "CMS dictionary initialization");
114 114 // The indexed free lists are initially all empty and are lazily
115 115 // filled in on demand. Initialize the array elements to NULL.
116 116 initializeIndexedFreeListArray();
117 117
118 118 // Not using adaptive free lists assumes that allocation is first
119 119 // from the linAB's. Also a cms perm gen which can be compacted
120 120 // has to have the klass's klassKlass allocated at a lower
121 121 // address in the heap than the klass so that the klassKlass is
122 122 // moved to its new location before the klass is moved.
123 123 // Set the _refillSize for the linear allocation blocks
124 124 if (!use_adaptive_freelists) {
125 125 FreeChunk* fc = _dictionary->get_chunk(mr.word_size());
126 126 // The small linAB initially has all the space and will allocate
127 127 // a chunk of any size.
128 128 HeapWord* addr = (HeapWord*) fc;
129 129 _smallLinearAllocBlock.set(addr, fc->size() ,
130 130 1024*SmallForLinearAlloc, fc->size());
131 131 // Note that _unallocated_block is not updated here.
132 132 // Allocations from the linear allocation block should
133 133 // update it.
134 134 } else {
135 135 _smallLinearAllocBlock.set(0, 0, 1024*SmallForLinearAlloc,
136 136 SmallForLinearAlloc);
137 137 }
138 138 // CMSIndexedFreeListReplenish should be at least 1
139 139 CMSIndexedFreeListReplenish = MAX2((uintx)1, CMSIndexedFreeListReplenish);
140 140 _promoInfo.setSpace(this);
141 141 if (UseCMSBestFit) {
142 142 _fitStrategy = FreeBlockBestFitFirst;
143 143 } else {
144 144 _fitStrategy = FreeBlockStrategyNone;
145 145 }
146 146 check_free_list_consistency();
147 147
148 148 // Initialize locks for parallel case.
149 149
150 150 if (CollectedHeap::use_parallel_gc_threads()) {
151 151 for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
152 152 _indexedFreeListParLocks[i] = new Mutex(Mutex::leaf - 1, // == ExpandHeap_lock - 1
153 153 "a freelist par lock",
154 154 true);
155 155 if (_indexedFreeListParLocks[i] == NULL)
156 156 vm_exit_during_initialization("Could not allocate a par lock");
157 157 DEBUG_ONLY(
158 158 _indexedFreeList[i].set_protecting_lock(_indexedFreeListParLocks[i]);
159 159 )
160 160 }
161 161 _dictionary->set_par_lock(&_parDictionaryAllocLock);
162 162 }
163 163 }
164 164
165 165 // Like CompactibleSpace forward() but always calls cross_threshold() to
166 166 // update the block offset table. Removed initialize_threshold call because
167 167 // CFLS does not use a block offset array for contiguous spaces.
168 168 HeapWord* CompactibleFreeListSpace::forward(oop q, size_t size,
169 169 CompactPoint* cp, HeapWord* compact_top) {
170 170 // q is alive
171 171 // First check if we should switch compaction space
172 172 assert(this == cp->space, "'this' should be current compaction space.");
173 173 size_t compaction_max_size = pointer_delta(end(), compact_top);
174 174 assert(adjustObjectSize(size) == cp->space->adjust_object_size_v(size),
175 175 "virtual adjustObjectSize_v() method is not correct");
176 176 size_t adjusted_size = adjustObjectSize(size);
177 177 assert(compaction_max_size >= MinChunkSize || compaction_max_size == 0,
178 178 "no small fragments allowed");
179 179 assert(minimum_free_block_size() == MinChunkSize,
180 180 "for de-virtualized reference below");
181 181 // Can't leave a nonzero size, residual fragment smaller than MinChunkSize
182 182 if (adjusted_size + MinChunkSize > compaction_max_size &&
183 183 adjusted_size != compaction_max_size) {
184 184 do {
185 185 // switch to next compaction space
186 186 cp->space->set_compaction_top(compact_top);
187 187 cp->space = cp->space->next_compaction_space();
188 188 if (cp->space == NULL) {
189 189 cp->gen = GenCollectedHeap::heap()->prev_gen(cp->gen);
190 190 assert(cp->gen != NULL, "compaction must succeed");
191 191 cp->space = cp->gen->first_compaction_space();
192 192 assert(cp->space != NULL, "generation must have a first compaction space");
193 193 }
194 194 compact_top = cp->space->bottom();
195 195 cp->space->set_compaction_top(compact_top);
196 196 // The correct adjusted_size may not be the same as that for this method
197 197 // (i.e., cp->space may no longer be "this" so adjust the size again.
198 198 // Use the virtual method which is not used above to save the virtual
199 199 // dispatch.
200 200 adjusted_size = cp->space->adjust_object_size_v(size);
201 201 compaction_max_size = pointer_delta(cp->space->end(), compact_top);
202 202 assert(cp->space->minimum_free_block_size() == 0, "just checking");
203 203 } while (adjusted_size > compaction_max_size);
204 204 }
205 205
206 206 // store the forwarding pointer into the mark word
207 207 if ((HeapWord*)q != compact_top) {
208 208 q->forward_to(oop(compact_top));
209 209 assert(q->is_gc_marked(), "encoding the pointer should preserve the mark");
210 210 } else {
211 211 // if the object isn't moving we can just set the mark to the default
212 212 // mark and handle it specially later on.
213 213 q->init_mark();
214 214 assert(q->forwardee() == NULL, "should be forwarded to NULL");
215 215 }
216 216
217 217 VALIDATE_MARK_SWEEP_ONLY(MarkSweep::register_live_oop(q, adjusted_size));
218 218 compact_top += adjusted_size;
219 219
220 220 // we need to update the offset table so that the beginnings of objects can be
221 221 // found during scavenge. Note that we are updating the offset table based on
222 222 // where the object will be once the compaction phase finishes.
223 223
224 224 // Always call cross_threshold(). A contiguous space can only call it when
225 225 // the compaction_top exceeds the current threshold but not for an
226 226 // non-contiguous space.
227 227 cp->threshold =
228 228 cp->space->cross_threshold(compact_top - adjusted_size, compact_top);
229 229 return compact_top;
230 230 }
231 231
232 232 // A modified copy of OffsetTableContigSpace::cross_threshold() with _offsets -> _bt
233 233 // and use of single_block instead of alloc_block. The name here is not really
234 234 // appropriate - maybe a more general name could be invented for both the
235 235 // contiguous and noncontiguous spaces.
236 236
237 237 HeapWord* CompactibleFreeListSpace::cross_threshold(HeapWord* start, HeapWord* the_end) {
238 238 _bt.single_block(start, the_end);
239 239 return end();
240 240 }
241 241
242 242 // Initialize them to NULL.
243 243 void CompactibleFreeListSpace::initializeIndexedFreeListArray() {
244 244 for (size_t i = 0; i < IndexSetSize; i++) {
245 245 // Note that on platforms where objects are double word aligned,
246 246 // the odd array elements are not used. It is convenient, however,
247 247 // to map directly from the object size to the array element.
248 248 _indexedFreeList[i].reset(IndexSetSize);
249 249 _indexedFreeList[i].set_size(i);
250 250 assert(_indexedFreeList[i].count() == 0, "reset check failed");
251 251 assert(_indexedFreeList[i].head() == NULL, "reset check failed");
252 252 assert(_indexedFreeList[i].tail() == NULL, "reset check failed");
253 253 assert(_indexedFreeList[i].hint() == IndexSetSize, "reset check failed");
254 254 }
255 255 }
256 256
257 257 void CompactibleFreeListSpace::resetIndexedFreeListArray() {
258 258 for (size_t i = 1; i < IndexSetSize; i++) {
259 259 assert(_indexedFreeList[i].size() == (size_t) i,
260 260 "Indexed free list sizes are incorrect");
261 261 _indexedFreeList[i].reset(IndexSetSize);
262 262 assert(_indexedFreeList[i].count() == 0, "reset check failed");
263 263 assert(_indexedFreeList[i].head() == NULL, "reset check failed");
264 264 assert(_indexedFreeList[i].tail() == NULL, "reset check failed");
265 265 assert(_indexedFreeList[i].hint() == IndexSetSize, "reset check failed");
266 266 }
267 267 }
268 268
269 269 void CompactibleFreeListSpace::reset(MemRegion mr) {
270 270 resetIndexedFreeListArray();
271 271 dictionary()->reset();
272 272 if (BlockOffsetArrayUseUnallocatedBlock) {
273 273 assert(end() == mr.end(), "We are compacting to the bottom of CMS gen");
274 274 // Everything's allocated until proven otherwise.
275 275 _bt.set_unallocated_block(end());
276 276 }
277 277 if (!mr.is_empty()) {
↓ open down ↓ |
277 lines elided |
↑ open up ↑ |
278 278 assert(mr.word_size() >= MinChunkSize, "Chunk size is too small");
279 279 _bt.single_block(mr.start(), mr.word_size());
280 280 FreeChunk* fc = (FreeChunk*) mr.start();
281 281 fc->set_size(mr.word_size());
282 282 if (mr.word_size() >= IndexSetSize ) {
283 283 returnChunkToDictionary(fc);
284 284 } else {
285 285 _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
286 286 _indexedFreeList[mr.word_size()].return_chunk_at_head(fc);
287 287 }
288 + coalBirth(mr.word_size());
288 289 }
289 290 _promoInfo.reset();
290 291 _smallLinearAllocBlock._ptr = NULL;
291 292 _smallLinearAllocBlock._word_size = 0;
292 293 }
293 294
294 295 void CompactibleFreeListSpace::reset_after_compaction() {
295 296 // Reset the space to the new reality - one free chunk.
296 297 MemRegion mr(compaction_top(), end());
297 298 reset(mr);
298 299 // Now refill the linear allocation block(s) if possible.
299 300 if (_adaptive_freelists) {
300 301 refillLinearAllocBlocksIfNeeded();
301 302 } else {
302 303 // Place as much of mr in the linAB as we can get,
303 304 // provided it was big enough to go into the dictionary.
304 305 FreeChunk* fc = dictionary()->find_largest_dict();
305 306 if (fc != NULL) {
306 307 assert(fc->size() == mr.word_size(),
307 308 "Why was the chunk broken up?");
308 309 removeChunkFromDictionary(fc);
309 310 HeapWord* addr = (HeapWord*) fc;
310 311 _smallLinearAllocBlock.set(addr, fc->size() ,
311 312 1024*SmallForLinearAlloc, fc->size());
312 313 // Note that _unallocated_block is not updated here.
313 314 }
314 315 }
315 316 }
316 317
317 318 // Walks the entire dictionary, returning a coterminal
318 319 // chunk, if it exists. Use with caution since it involves
319 320 // a potentially complete walk of a potentially large tree.
320 321 FreeChunk* CompactibleFreeListSpace::find_chunk_at_end() {
321 322
322 323 assert_lock_strong(&_freelistLock);
323 324
324 325 return dictionary()->find_chunk_ends_at(end());
325 326 }
326 327
327 328
328 329 #ifndef PRODUCT
329 330 void CompactibleFreeListSpace::initializeIndexedFreeListArrayReturnedBytes() {
330 331 for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
331 332 _indexedFreeList[i].allocation_stats()->set_returned_bytes(0);
332 333 }
333 334 }
334 335
335 336 size_t CompactibleFreeListSpace::sumIndexedFreeListArrayReturnedBytes() {
336 337 size_t sum = 0;
337 338 for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
338 339 sum += _indexedFreeList[i].allocation_stats()->returned_bytes();
339 340 }
340 341 return sum;
341 342 }
342 343
343 344 size_t CompactibleFreeListSpace::totalCountInIndexedFreeLists() const {
344 345 size_t count = 0;
345 346 for (size_t i = IndexSetStart; i < IndexSetSize; i++) {
346 347 debug_only(
347 348 ssize_t total_list_count = 0;
348 349 for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
349 350 fc = fc->next()) {
350 351 total_list_count++;
351 352 }
352 353 assert(total_list_count == _indexedFreeList[i].count(),
353 354 "Count in list is incorrect");
354 355 )
355 356 count += _indexedFreeList[i].count();
356 357 }
357 358 return count;
358 359 }
359 360
360 361 size_t CompactibleFreeListSpace::totalCount() {
361 362 size_t num = totalCountInIndexedFreeLists();
362 363 num += dictionary()->total_count();
363 364 if (_smallLinearAllocBlock._word_size != 0) {
364 365 num++;
365 366 }
366 367 return num;
367 368 }
368 369 #endif
369 370
370 371 bool CompactibleFreeListSpace::is_free_block(const HeapWord* p) const {
371 372 FreeChunk* fc = (FreeChunk*) p;
372 373 return fc->is_free();
373 374 }
374 375
375 376 size_t CompactibleFreeListSpace::used() const {
376 377 return capacity() - free();
377 378 }
378 379
379 380 size_t CompactibleFreeListSpace::free() const {
380 381 // "MT-safe, but not MT-precise"(TM), if you will: i.e.
381 382 // if you do this while the structures are in flux you
382 383 // may get an approximate answer only; for instance
383 384 // because there is concurrent allocation either
384 385 // directly by mutators or for promotion during a GC.
385 386 // It's "MT-safe", however, in the sense that you are guaranteed
386 387 // not to crash and burn, for instance, because of walking
387 388 // pointers that could disappear as you were walking them.
388 389 // The approximation is because the various components
389 390 // that are read below are not read atomically (and
390 391 // further the computation of totalSizeInIndexedFreeLists()
391 392 // is itself a non-atomic computation. The normal use of
392 393 // this is during a resize operation at the end of GC
393 394 // and at that time you are guaranteed to get the
394 395 // correct actual value. However, for instance, this is
395 396 // also read completely asynchronously by the "perf-sampler"
396 397 // that supports jvmstat, and you are apt to see the values
397 398 // flicker in such cases.
398 399 assert(_dictionary != NULL, "No _dictionary?");
399 400 return (_dictionary->total_chunk_size(DEBUG_ONLY(freelistLock())) +
400 401 totalSizeInIndexedFreeLists() +
401 402 _smallLinearAllocBlock._word_size) * HeapWordSize;
402 403 }
403 404
404 405 size_t CompactibleFreeListSpace::max_alloc_in_words() const {
405 406 assert(_dictionary != NULL, "No _dictionary?");
406 407 assert_locked();
407 408 size_t res = _dictionary->max_chunk_size();
408 409 res = MAX2(res, MIN2(_smallLinearAllocBlock._word_size,
409 410 (size_t) SmallForLinearAlloc - 1));
410 411 // XXX the following could potentially be pretty slow;
411 412 // should one, pesimally for the rare cases when res
412 413 // caclulated above is less than IndexSetSize,
413 414 // just return res calculated above? My reasoning was that
414 415 // those cases will be so rare that the extra time spent doesn't
415 416 // really matter....
416 417 // Note: do not change the loop test i >= res + IndexSetStride
417 418 // to i > res below, because i is unsigned and res may be zero.
418 419 for (size_t i = IndexSetSize - 1; i >= res + IndexSetStride;
419 420 i -= IndexSetStride) {
420 421 if (_indexedFreeList[i].head() != NULL) {
421 422 assert(_indexedFreeList[i].count() != 0, "Inconsistent FreeList");
422 423 return i;
423 424 }
424 425 }
425 426 return res;
426 427 }
427 428
428 429 void LinearAllocBlock::print_on(outputStream* st) const {
429 430 st->print_cr(" LinearAllocBlock: ptr = " PTR_FORMAT ", word_size = " SIZE_FORMAT
430 431 ", refillsize = " SIZE_FORMAT ", allocation_size_limit = " SIZE_FORMAT,
431 432 _ptr, _word_size, _refillSize, _allocation_size_limit);
432 433 }
433 434
434 435 void CompactibleFreeListSpace::print_on(outputStream* st) const {
435 436 st->print_cr("COMPACTIBLE FREELIST SPACE");
436 437 st->print_cr(" Space:");
437 438 Space::print_on(st);
438 439
439 440 st->print_cr("promoInfo:");
440 441 _promoInfo.print_on(st);
441 442
442 443 st->print_cr("_smallLinearAllocBlock");
443 444 _smallLinearAllocBlock.print_on(st);
444 445
445 446 // dump_memory_block(_smallLinearAllocBlock->_ptr, 128);
446 447
447 448 st->print_cr(" _fitStrategy = %s, _adaptive_freelists = %s",
448 449 _fitStrategy?"true":"false", _adaptive_freelists?"true":"false");
449 450 }
450 451
451 452 void CompactibleFreeListSpace::print_indexed_free_lists(outputStream* st)
452 453 const {
453 454 reportIndexedFreeListStatistics();
454 455 gclog_or_tty->print_cr("Layout of Indexed Freelists");
455 456 gclog_or_tty->print_cr("---------------------------");
456 457 FreeList<FreeChunk>::print_labels_on(st, "size");
457 458 for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
458 459 _indexedFreeList[i].print_on(gclog_or_tty);
459 460 for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
460 461 fc = fc->next()) {
461 462 gclog_or_tty->print_cr("\t[" PTR_FORMAT "," PTR_FORMAT ") %s",
462 463 fc, (HeapWord*)fc + i,
463 464 fc->cantCoalesce() ? "\t CC" : "");
464 465 }
465 466 }
466 467 }
467 468
468 469 void CompactibleFreeListSpace::print_promo_info_blocks(outputStream* st)
469 470 const {
470 471 _promoInfo.print_on(st);
471 472 }
472 473
473 474 void CompactibleFreeListSpace::print_dictionary_free_lists(outputStream* st)
474 475 const {
475 476 _dictionary->report_statistics();
476 477 st->print_cr("Layout of Freelists in Tree");
477 478 st->print_cr("---------------------------");
478 479 _dictionary->print_free_lists(st);
479 480 }
480 481
481 482 class BlkPrintingClosure: public BlkClosure {
482 483 const CMSCollector* _collector;
483 484 const CompactibleFreeListSpace* _sp;
484 485 const CMSBitMap* _live_bit_map;
485 486 const bool _post_remark;
486 487 outputStream* _st;
487 488 public:
488 489 BlkPrintingClosure(const CMSCollector* collector,
489 490 const CompactibleFreeListSpace* sp,
490 491 const CMSBitMap* live_bit_map,
491 492 outputStream* st):
492 493 _collector(collector),
493 494 _sp(sp),
494 495 _live_bit_map(live_bit_map),
495 496 _post_remark(collector->abstract_state() > CMSCollector::FinalMarking),
496 497 _st(st) { }
497 498 size_t do_blk(HeapWord* addr);
498 499 };
499 500
500 501 size_t BlkPrintingClosure::do_blk(HeapWord* addr) {
501 502 size_t sz = _sp->block_size_no_stall(addr, _collector);
502 503 assert(sz != 0, "Should always be able to compute a size");
503 504 if (_sp->block_is_obj(addr)) {
504 505 const bool dead = _post_remark && !_live_bit_map->isMarked(addr);
505 506 _st->print_cr(PTR_FORMAT ": %s object of size " SIZE_FORMAT "%s",
506 507 addr,
507 508 dead ? "dead" : "live",
508 509 sz,
509 510 (!dead && CMSPrintObjectsInDump) ? ":" : ".");
510 511 if (CMSPrintObjectsInDump && !dead) {
511 512 oop(addr)->print_on(_st);
512 513 _st->print_cr("--------------------------------------");
513 514 }
514 515 } else { // free block
515 516 _st->print_cr(PTR_FORMAT ": free block of size " SIZE_FORMAT "%s",
516 517 addr, sz, CMSPrintChunksInDump ? ":" : ".");
517 518 if (CMSPrintChunksInDump) {
518 519 ((FreeChunk*)addr)->print_on(_st);
519 520 _st->print_cr("--------------------------------------");
520 521 }
521 522 }
522 523 return sz;
523 524 }
524 525
525 526 void CompactibleFreeListSpace::dump_at_safepoint_with_locks(CMSCollector* c,
526 527 outputStream* st) {
527 528 st->print_cr("\n=========================");
528 529 st->print_cr("Block layout in CMS Heap:");
529 530 st->print_cr("=========================");
530 531 BlkPrintingClosure bpcl(c, this, c->markBitMap(), st);
531 532 blk_iterate(&bpcl);
532 533
533 534 st->print_cr("\n=======================================");
534 535 st->print_cr("Order & Layout of Promotion Info Blocks");
535 536 st->print_cr("=======================================");
536 537 print_promo_info_blocks(st);
537 538
538 539 st->print_cr("\n===========================");
539 540 st->print_cr("Order of Indexed Free Lists");
540 541 st->print_cr("=========================");
541 542 print_indexed_free_lists(st);
542 543
543 544 st->print_cr("\n=================================");
544 545 st->print_cr("Order of Free Lists in Dictionary");
545 546 st->print_cr("=================================");
546 547 print_dictionary_free_lists(st);
547 548 }
548 549
549 550
550 551 void CompactibleFreeListSpace::reportFreeListStatistics() const {
551 552 assert_lock_strong(&_freelistLock);
552 553 assert(PrintFLSStatistics != 0, "Reporting error");
553 554 _dictionary->report_statistics();
554 555 if (PrintFLSStatistics > 1) {
555 556 reportIndexedFreeListStatistics();
556 557 size_t total_size = totalSizeInIndexedFreeLists() +
557 558 _dictionary->total_chunk_size(DEBUG_ONLY(freelistLock()));
558 559 gclog_or_tty->print(" free=%ld frag=%1.4f\n", total_size, flsFrag());
559 560 }
560 561 }
561 562
562 563 void CompactibleFreeListSpace::reportIndexedFreeListStatistics() const {
563 564 assert_lock_strong(&_freelistLock);
564 565 gclog_or_tty->print("Statistics for IndexedFreeLists:\n"
565 566 "--------------------------------\n");
566 567 size_t total_size = totalSizeInIndexedFreeLists();
567 568 size_t free_blocks = numFreeBlocksInIndexedFreeLists();
568 569 gclog_or_tty->print("Total Free Space: %d\n", total_size);
569 570 gclog_or_tty->print("Max Chunk Size: %d\n", maxChunkSizeInIndexedFreeLists());
570 571 gclog_or_tty->print("Number of Blocks: %d\n", free_blocks);
571 572 if (free_blocks != 0) {
572 573 gclog_or_tty->print("Av. Block Size: %d\n", total_size/free_blocks);
573 574 }
574 575 }
575 576
576 577 size_t CompactibleFreeListSpace::numFreeBlocksInIndexedFreeLists() const {
577 578 size_t res = 0;
578 579 for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
579 580 debug_only(
580 581 ssize_t recount = 0;
581 582 for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
582 583 fc = fc->next()) {
583 584 recount += 1;
584 585 }
585 586 assert(recount == _indexedFreeList[i].count(),
586 587 "Incorrect count in list");
587 588 )
588 589 res += _indexedFreeList[i].count();
589 590 }
590 591 return res;
591 592 }
592 593
593 594 size_t CompactibleFreeListSpace::maxChunkSizeInIndexedFreeLists() const {
594 595 for (size_t i = IndexSetSize - 1; i != 0; i -= IndexSetStride) {
595 596 if (_indexedFreeList[i].head() != NULL) {
596 597 assert(_indexedFreeList[i].count() != 0, "Inconsistent FreeList");
597 598 return (size_t)i;
598 599 }
599 600 }
600 601 return 0;
601 602 }
602 603
603 604 void CompactibleFreeListSpace::set_end(HeapWord* value) {
604 605 HeapWord* prevEnd = end();
605 606 assert(prevEnd != value, "unnecessary set_end call");
606 607 assert(prevEnd == NULL || !BlockOffsetArrayUseUnallocatedBlock || value >= unallocated_block(),
607 608 "New end is below unallocated block");
608 609 _end = value;
609 610 if (prevEnd != NULL) {
610 611 // Resize the underlying block offset table.
611 612 _bt.resize(pointer_delta(value, bottom()));
612 613 if (value <= prevEnd) {
613 614 assert(!BlockOffsetArrayUseUnallocatedBlock || value >= unallocated_block(),
614 615 "New end is below unallocated block");
615 616 } else {
616 617 // Now, take this new chunk and add it to the free blocks.
617 618 // Note that the BOT has not yet been updated for this block.
618 619 size_t newFcSize = pointer_delta(value, prevEnd);
619 620 // XXX This is REALLY UGLY and should be fixed up. XXX
620 621 if (!_adaptive_freelists && _smallLinearAllocBlock._ptr == NULL) {
621 622 // Mark the boundary of the new block in BOT
622 623 _bt.mark_block(prevEnd, value);
623 624 // put it all in the linAB
624 625 if (ParallelGCThreads == 0) {
625 626 _smallLinearAllocBlock._ptr = prevEnd;
626 627 _smallLinearAllocBlock._word_size = newFcSize;
627 628 repairLinearAllocBlock(&_smallLinearAllocBlock);
628 629 } else { // ParallelGCThreads > 0
629 630 MutexLockerEx x(parDictionaryAllocLock(),
630 631 Mutex::_no_safepoint_check_flag);
631 632 _smallLinearAllocBlock._ptr = prevEnd;
632 633 _smallLinearAllocBlock._word_size = newFcSize;
633 634 repairLinearAllocBlock(&_smallLinearAllocBlock);
634 635 }
635 636 // Births of chunks put into a LinAB are not recorded. Births
636 637 // of chunks as they are allocated out of a LinAB are.
637 638 } else {
638 639 // Add the block to the free lists, if possible coalescing it
639 640 // with the last free block, and update the BOT and census data.
640 641 addChunkToFreeListsAtEndRecordingStats(prevEnd, newFcSize);
641 642 }
642 643 }
643 644 }
644 645 }
645 646
646 647 class FreeListSpace_DCTOC : public Filtering_DCTOC {
647 648 CompactibleFreeListSpace* _cfls;
648 649 CMSCollector* _collector;
649 650 protected:
650 651 // Override.
651 652 #define walk_mem_region_with_cl_DECL(ClosureType) \
652 653 virtual void walk_mem_region_with_cl(MemRegion mr, \
653 654 HeapWord* bottom, HeapWord* top, \
654 655 ClosureType* cl); \
655 656 void walk_mem_region_with_cl_par(MemRegion mr, \
656 657 HeapWord* bottom, HeapWord* top, \
657 658 ClosureType* cl); \
658 659 void walk_mem_region_with_cl_nopar(MemRegion mr, \
659 660 HeapWord* bottom, HeapWord* top, \
660 661 ClosureType* cl)
661 662 walk_mem_region_with_cl_DECL(OopClosure);
662 663 walk_mem_region_with_cl_DECL(FilteringClosure);
663 664
664 665 public:
665 666 FreeListSpace_DCTOC(CompactibleFreeListSpace* sp,
666 667 CMSCollector* collector,
667 668 OopClosure* cl,
668 669 CardTableModRefBS::PrecisionStyle precision,
669 670 HeapWord* boundary) :
670 671 Filtering_DCTOC(sp, cl, precision, boundary),
671 672 _cfls(sp), _collector(collector) {}
672 673 };
673 674
674 675 // We de-virtualize the block-related calls below, since we know that our
675 676 // space is a CompactibleFreeListSpace.
676 677
677 678 #define FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(ClosureType) \
678 679 void FreeListSpace_DCTOC::walk_mem_region_with_cl(MemRegion mr, \
679 680 HeapWord* bottom, \
680 681 HeapWord* top, \
681 682 ClosureType* cl) { \
682 683 bool is_par = SharedHeap::heap()->n_par_threads() > 0; \
683 684 if (is_par) { \
684 685 assert(SharedHeap::heap()->n_par_threads() == \
685 686 SharedHeap::heap()->workers()->active_workers(), "Mismatch"); \
686 687 walk_mem_region_with_cl_par(mr, bottom, top, cl); \
687 688 } else { \
688 689 walk_mem_region_with_cl_nopar(mr, bottom, top, cl); \
689 690 } \
690 691 } \
691 692 void FreeListSpace_DCTOC::walk_mem_region_with_cl_par(MemRegion mr, \
692 693 HeapWord* bottom, \
693 694 HeapWord* top, \
694 695 ClosureType* cl) { \
695 696 /* Skip parts that are before "mr", in case "block_start" sent us \
696 697 back too far. */ \
697 698 HeapWord* mr_start = mr.start(); \
698 699 size_t bot_size = _cfls->CompactibleFreeListSpace::block_size(bottom); \
699 700 HeapWord* next = bottom + bot_size; \
700 701 while (next < mr_start) { \
701 702 bottom = next; \
702 703 bot_size = _cfls->CompactibleFreeListSpace::block_size(bottom); \
703 704 next = bottom + bot_size; \
704 705 } \
705 706 \
706 707 while (bottom < top) { \
707 708 if (_cfls->CompactibleFreeListSpace::block_is_obj(bottom) && \
708 709 !_cfls->CompactibleFreeListSpace::obj_allocated_since_save_marks( \
709 710 oop(bottom)) && \
710 711 !_collector->CMSCollector::is_dead_obj(oop(bottom))) { \
711 712 size_t word_sz = oop(bottom)->oop_iterate(cl, mr); \
712 713 bottom += _cfls->adjustObjectSize(word_sz); \
713 714 } else { \
714 715 bottom += _cfls->CompactibleFreeListSpace::block_size(bottom); \
715 716 } \
716 717 } \
717 718 } \
718 719 void FreeListSpace_DCTOC::walk_mem_region_with_cl_nopar(MemRegion mr, \
719 720 HeapWord* bottom, \
720 721 HeapWord* top, \
721 722 ClosureType* cl) { \
722 723 /* Skip parts that are before "mr", in case "block_start" sent us \
723 724 back too far. */ \
724 725 HeapWord* mr_start = mr.start(); \
725 726 size_t bot_size = _cfls->CompactibleFreeListSpace::block_size_nopar(bottom); \
726 727 HeapWord* next = bottom + bot_size; \
727 728 while (next < mr_start) { \
728 729 bottom = next; \
729 730 bot_size = _cfls->CompactibleFreeListSpace::block_size_nopar(bottom); \
730 731 next = bottom + bot_size; \
731 732 } \
732 733 \
733 734 while (bottom < top) { \
734 735 if (_cfls->CompactibleFreeListSpace::block_is_obj_nopar(bottom) && \
735 736 !_cfls->CompactibleFreeListSpace::obj_allocated_since_save_marks( \
736 737 oop(bottom)) && \
737 738 !_collector->CMSCollector::is_dead_obj(oop(bottom))) { \
738 739 size_t word_sz = oop(bottom)->oop_iterate(cl, mr); \
739 740 bottom += _cfls->adjustObjectSize(word_sz); \
740 741 } else { \
741 742 bottom += _cfls->CompactibleFreeListSpace::block_size_nopar(bottom); \
742 743 } \
743 744 } \
744 745 }
745 746
746 747 // (There are only two of these, rather than N, because the split is due
747 748 // only to the introduction of the FilteringClosure, a local part of the
748 749 // impl of this abstraction.)
749 750 FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(OopClosure)
750 751 FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(FilteringClosure)
751 752
752 753 DirtyCardToOopClosure*
753 754 CompactibleFreeListSpace::new_dcto_cl(OopClosure* cl,
754 755 CardTableModRefBS::PrecisionStyle precision,
755 756 HeapWord* boundary) {
756 757 return new FreeListSpace_DCTOC(this, _collector, cl, precision, boundary);
757 758 }
758 759
759 760
760 761 // Note on locking for the space iteration functions:
761 762 // since the collector's iteration activities are concurrent with
762 763 // allocation activities by mutators, absent a suitable mutual exclusion
763 764 // mechanism the iterators may go awry. For instace a block being iterated
764 765 // may suddenly be allocated or divided up and part of it allocated and
765 766 // so on.
766 767
767 768 // Apply the given closure to each block in the space.
768 769 void CompactibleFreeListSpace::blk_iterate_careful(BlkClosureCareful* cl) {
769 770 assert_lock_strong(freelistLock());
770 771 HeapWord *cur, *limit;
771 772 for (cur = bottom(), limit = end(); cur < limit;
772 773 cur += cl->do_blk_careful(cur));
773 774 }
774 775
775 776 // Apply the given closure to each block in the space.
776 777 void CompactibleFreeListSpace::blk_iterate(BlkClosure* cl) {
777 778 assert_lock_strong(freelistLock());
778 779 HeapWord *cur, *limit;
779 780 for (cur = bottom(), limit = end(); cur < limit;
780 781 cur += cl->do_blk(cur));
781 782 }
782 783
783 784 // Apply the given closure to each oop in the space.
784 785 void CompactibleFreeListSpace::oop_iterate(OopClosure* cl) {
785 786 assert_lock_strong(freelistLock());
786 787 HeapWord *cur, *limit;
787 788 size_t curSize;
788 789 for (cur = bottom(), limit = end(); cur < limit;
789 790 cur += curSize) {
790 791 curSize = block_size(cur);
791 792 if (block_is_obj(cur)) {
792 793 oop(cur)->oop_iterate(cl);
793 794 }
794 795 }
795 796 }
796 797
797 798 // Apply the given closure to each oop in the space \intersect memory region.
798 799 void CompactibleFreeListSpace::oop_iterate(MemRegion mr, OopClosure* cl) {
799 800 assert_lock_strong(freelistLock());
800 801 if (is_empty()) {
801 802 return;
802 803 }
803 804 MemRegion cur = MemRegion(bottom(), end());
804 805 mr = mr.intersection(cur);
805 806 if (mr.is_empty()) {
806 807 return;
807 808 }
808 809 if (mr.equals(cur)) {
809 810 oop_iterate(cl);
810 811 return;
811 812 }
812 813 assert(mr.end() <= end(), "just took an intersection above");
813 814 HeapWord* obj_addr = block_start(mr.start());
814 815 HeapWord* t = mr.end();
815 816
816 817 SpaceMemRegionOopsIterClosure smr_blk(cl, mr);
817 818 if (block_is_obj(obj_addr)) {
818 819 // Handle first object specially.
819 820 oop obj = oop(obj_addr);
820 821 obj_addr += adjustObjectSize(obj->oop_iterate(&smr_blk));
821 822 } else {
822 823 FreeChunk* fc = (FreeChunk*)obj_addr;
823 824 obj_addr += fc->size();
824 825 }
825 826 while (obj_addr < t) {
826 827 HeapWord* obj = obj_addr;
827 828 obj_addr += block_size(obj_addr);
828 829 // If "obj_addr" is not greater than top, then the
829 830 // entire object "obj" is within the region.
830 831 if (obj_addr <= t) {
831 832 if (block_is_obj(obj)) {
832 833 oop(obj)->oop_iterate(cl);
833 834 }
834 835 } else {
835 836 // "obj" extends beyond end of region
836 837 if (block_is_obj(obj)) {
837 838 oop(obj)->oop_iterate(&smr_blk);
838 839 }
839 840 break;
840 841 }
841 842 }
842 843 }
843 844
844 845 // NOTE: In the following methods, in order to safely be able to
845 846 // apply the closure to an object, we need to be sure that the
846 847 // object has been initialized. We are guaranteed that an object
847 848 // is initialized if we are holding the Heap_lock with the
848 849 // world stopped.
849 850 void CompactibleFreeListSpace::verify_objects_initialized() const {
850 851 if (is_init_completed()) {
851 852 assert_locked_or_safepoint(Heap_lock);
852 853 if (Universe::is_fully_initialized()) {
853 854 guarantee(SafepointSynchronize::is_at_safepoint(),
854 855 "Required for objects to be initialized");
855 856 }
856 857 } // else make a concession at vm start-up
857 858 }
858 859
859 860 // Apply the given closure to each object in the space
860 861 void CompactibleFreeListSpace::object_iterate(ObjectClosure* blk) {
861 862 assert_lock_strong(freelistLock());
862 863 NOT_PRODUCT(verify_objects_initialized());
863 864 HeapWord *cur, *limit;
864 865 size_t curSize;
865 866 for (cur = bottom(), limit = end(); cur < limit;
866 867 cur += curSize) {
867 868 curSize = block_size(cur);
868 869 if (block_is_obj(cur)) {
869 870 blk->do_object(oop(cur));
870 871 }
871 872 }
872 873 }
873 874
874 875 // Apply the given closure to each live object in the space
875 876 // The usage of CompactibleFreeListSpace
876 877 // by the ConcurrentMarkSweepGeneration for concurrent GC's allows
877 878 // objects in the space with references to objects that are no longer
878 879 // valid. For example, an object may reference another object
879 880 // that has already been sweep up (collected). This method uses
880 881 // obj_is_alive() to determine whether it is safe to apply the closure to
881 882 // an object. See obj_is_alive() for details on how liveness of an
882 883 // object is decided.
883 884
884 885 void CompactibleFreeListSpace::safe_object_iterate(ObjectClosure* blk) {
885 886 assert_lock_strong(freelistLock());
886 887 NOT_PRODUCT(verify_objects_initialized());
887 888 HeapWord *cur, *limit;
888 889 size_t curSize;
889 890 for (cur = bottom(), limit = end(); cur < limit;
890 891 cur += curSize) {
891 892 curSize = block_size(cur);
892 893 if (block_is_obj(cur) && obj_is_alive(cur)) {
893 894 blk->do_object(oop(cur));
894 895 }
895 896 }
896 897 }
897 898
898 899 void CompactibleFreeListSpace::object_iterate_mem(MemRegion mr,
899 900 UpwardsObjectClosure* cl) {
900 901 assert_locked(freelistLock());
901 902 NOT_PRODUCT(verify_objects_initialized());
902 903 Space::object_iterate_mem(mr, cl);
903 904 }
904 905
905 906 // Callers of this iterator beware: The closure application should
906 907 // be robust in the face of uninitialized objects and should (always)
907 908 // return a correct size so that the next addr + size below gives us a
908 909 // valid block boundary. [See for instance,
909 910 // ScanMarkedObjectsAgainCarefullyClosure::do_object_careful()
910 911 // in ConcurrentMarkSweepGeneration.cpp.]
911 912 HeapWord*
912 913 CompactibleFreeListSpace::object_iterate_careful(ObjectClosureCareful* cl) {
913 914 assert_lock_strong(freelistLock());
914 915 HeapWord *addr, *last;
915 916 size_t size;
916 917 for (addr = bottom(), last = end();
917 918 addr < last; addr += size) {
918 919 FreeChunk* fc = (FreeChunk*)addr;
919 920 if (fc->is_free()) {
920 921 // Since we hold the free list lock, which protects direct
921 922 // allocation in this generation by mutators, a free object
922 923 // will remain free throughout this iteration code.
923 924 size = fc->size();
924 925 } else {
925 926 // Note that the object need not necessarily be initialized,
926 927 // because (for instance) the free list lock does NOT protect
927 928 // object initialization. The closure application below must
928 929 // therefore be correct in the face of uninitialized objects.
929 930 size = cl->do_object_careful(oop(addr));
930 931 if (size == 0) {
931 932 // An unparsable object found. Signal early termination.
932 933 return addr;
933 934 }
934 935 }
935 936 }
936 937 return NULL;
937 938 }
938 939
939 940 // Callers of this iterator beware: The closure application should
940 941 // be robust in the face of uninitialized objects and should (always)
941 942 // return a correct size so that the next addr + size below gives us a
942 943 // valid block boundary. [See for instance,
943 944 // ScanMarkedObjectsAgainCarefullyClosure::do_object_careful()
944 945 // in ConcurrentMarkSweepGeneration.cpp.]
945 946 HeapWord*
946 947 CompactibleFreeListSpace::object_iterate_careful_m(MemRegion mr,
947 948 ObjectClosureCareful* cl) {
948 949 assert_lock_strong(freelistLock());
949 950 // Can't use used_region() below because it may not necessarily
950 951 // be the same as [bottom(),end()); although we could
951 952 // use [used_region().start(),round_to(used_region().end(),CardSize)),
952 953 // that appears too cumbersome, so we just do the simpler check
953 954 // in the assertion below.
954 955 assert(!mr.is_empty() && MemRegion(bottom(),end()).contains(mr),
955 956 "mr should be non-empty and within used space");
956 957 HeapWord *addr, *end;
957 958 size_t size;
958 959 for (addr = block_start_careful(mr.start()), end = mr.end();
959 960 addr < end; addr += size) {
960 961 FreeChunk* fc = (FreeChunk*)addr;
961 962 if (fc->is_free()) {
962 963 // Since we hold the free list lock, which protects direct
963 964 // allocation in this generation by mutators, a free object
964 965 // will remain free throughout this iteration code.
965 966 size = fc->size();
966 967 } else {
967 968 // Note that the object need not necessarily be initialized,
968 969 // because (for instance) the free list lock does NOT protect
969 970 // object initialization. The closure application below must
970 971 // therefore be correct in the face of uninitialized objects.
971 972 size = cl->do_object_careful_m(oop(addr), mr);
972 973 if (size == 0) {
973 974 // An unparsable object found. Signal early termination.
974 975 return addr;
975 976 }
976 977 }
977 978 }
978 979 return NULL;
979 980 }
980 981
981 982
982 983 HeapWord* CompactibleFreeListSpace::block_start_const(const void* p) const {
983 984 NOT_PRODUCT(verify_objects_initialized());
984 985 return _bt.block_start(p);
985 986 }
986 987
987 988 HeapWord* CompactibleFreeListSpace::block_start_careful(const void* p) const {
988 989 return _bt.block_start_careful(p);
989 990 }
990 991
991 992 size_t CompactibleFreeListSpace::block_size(const HeapWord* p) const {
992 993 NOT_PRODUCT(verify_objects_initialized());
993 994 // This must be volatile, or else there is a danger that the compiler
994 995 // will compile the code below into a sometimes-infinite loop, by keeping
995 996 // the value read the first time in a register.
996 997 while (true) {
997 998 // We must do this until we get a consistent view of the object.
998 999 if (FreeChunk::indicatesFreeChunk(p)) {
999 1000 volatile FreeChunk* fc = (volatile FreeChunk*)p;
1000 1001 size_t res = fc->size();
1001 1002 // If the object is still a free chunk, return the size, else it
1002 1003 // has been allocated so try again.
1003 1004 if (FreeChunk::indicatesFreeChunk(p)) {
1004 1005 assert(res != 0, "Block size should not be 0");
1005 1006 return res;
1006 1007 }
1007 1008 } else {
1008 1009 // must read from what 'p' points to in each loop.
1009 1010 klassOop k = ((volatile oopDesc*)p)->klass_or_null();
1010 1011 if (k != NULL) {
1011 1012 assert(k->is_oop(true /* ignore mark word */), "Should be klass oop");
1012 1013 oop o = (oop)p;
1013 1014 assert(o->is_parsable(), "Should be parsable");
1014 1015 assert(o->is_oop(true /* ignore mark word */), "Should be an oop.");
1015 1016 size_t res = o->size_given_klass(k->klass_part());
1016 1017 res = adjustObjectSize(res);
1017 1018 assert(res != 0, "Block size should not be 0");
1018 1019 return res;
1019 1020 }
1020 1021 }
1021 1022 }
1022 1023 }
1023 1024
1024 1025 // A variant of the above that uses the Printezis bits for
1025 1026 // unparsable but allocated objects. This avoids any possible
1026 1027 // stalls waiting for mutators to initialize objects, and is
1027 1028 // thus potentially faster than the variant above. However,
1028 1029 // this variant may return a zero size for a block that is
1029 1030 // under mutation and for which a consistent size cannot be
1030 1031 // inferred without stalling; see CMSCollector::block_size_if_printezis_bits().
1031 1032 size_t CompactibleFreeListSpace::block_size_no_stall(HeapWord* p,
1032 1033 const CMSCollector* c)
1033 1034 const {
1034 1035 assert(MemRegion(bottom(), end()).contains(p), "p not in space");
1035 1036 // This must be volatile, or else there is a danger that the compiler
1036 1037 // will compile the code below into a sometimes-infinite loop, by keeping
1037 1038 // the value read the first time in a register.
1038 1039 DEBUG_ONLY(uint loops = 0;)
1039 1040 while (true) {
1040 1041 // We must do this until we get a consistent view of the object.
1041 1042 if (FreeChunk::indicatesFreeChunk(p)) {
1042 1043 volatile FreeChunk* fc = (volatile FreeChunk*)p;
1043 1044 size_t res = fc->size();
1044 1045 if (FreeChunk::indicatesFreeChunk(p)) {
1045 1046 assert(res != 0, "Block size should not be 0");
1046 1047 assert(loops == 0, "Should be 0");
1047 1048 return res;
1048 1049 }
1049 1050 } else {
1050 1051 // must read from what 'p' points to in each loop.
1051 1052 klassOop k = ((volatile oopDesc*)p)->klass_or_null();
1052 1053 // We trust the size of any object that has a non-NULL
1053 1054 // klass and (for those in the perm gen) is parsable
1054 1055 // -- irrespective of its conc_safe-ty.
1055 1056 if (k != NULL && ((oopDesc*)p)->is_parsable()) {
1056 1057 assert(k->is_oop(), "Should really be klass oop.");
1057 1058 oop o = (oop)p;
1058 1059 assert(o->is_oop(), "Should be an oop");
1059 1060 size_t res = o->size_given_klass(k->klass_part());
1060 1061 res = adjustObjectSize(res);
1061 1062 assert(res != 0, "Block size should not be 0");
1062 1063 return res;
1063 1064 } else {
1064 1065 // May return 0 if P-bits not present.
1065 1066 return c->block_size_if_printezis_bits(p);
1066 1067 }
1067 1068 }
1068 1069 assert(loops == 0, "Can loop at most once");
1069 1070 DEBUG_ONLY(loops++;)
1070 1071 }
1071 1072 }
1072 1073
1073 1074 size_t CompactibleFreeListSpace::block_size_nopar(const HeapWord* p) const {
1074 1075 NOT_PRODUCT(verify_objects_initialized());
1075 1076 assert(MemRegion(bottom(), end()).contains(p), "p not in space");
1076 1077 FreeChunk* fc = (FreeChunk*)p;
1077 1078 if (fc->is_free()) {
1078 1079 return fc->size();
1079 1080 } else {
1080 1081 // Ignore mark word because this may be a recently promoted
1081 1082 // object whose mark word is used to chain together grey
1082 1083 // objects (the last one would have a null value).
1083 1084 assert(oop(p)->is_oop(true), "Should be an oop");
1084 1085 return adjustObjectSize(oop(p)->size());
1085 1086 }
1086 1087 }
1087 1088
1088 1089 // This implementation assumes that the property of "being an object" is
1089 1090 // stable. But being a free chunk may not be (because of parallel
1090 1091 // promotion.)
1091 1092 bool CompactibleFreeListSpace::block_is_obj(const HeapWord* p) const {
1092 1093 FreeChunk* fc = (FreeChunk*)p;
1093 1094 assert(is_in_reserved(p), "Should be in space");
1094 1095 // When doing a mark-sweep-compact of the CMS generation, this
1095 1096 // assertion may fail because prepare_for_compaction() uses
1096 1097 // space that is garbage to maintain information on ranges of
1097 1098 // live objects so that these live ranges can be moved as a whole.
1098 1099 // Comment out this assertion until that problem can be solved
1099 1100 // (i.e., that the block start calculation may look at objects
1100 1101 // at address below "p" in finding the object that contains "p"
1101 1102 // and those objects (if garbage) may have been modified to hold
1102 1103 // live range information.
1103 1104 // assert(CollectedHeap::use_parallel_gc_threads() || _bt.block_start(p) == p,
1104 1105 // "Should be a block boundary");
1105 1106 if (FreeChunk::indicatesFreeChunk(p)) return false;
1106 1107 klassOop k = oop(p)->klass_or_null();
1107 1108 if (k != NULL) {
1108 1109 // Ignore mark word because it may have been used to
1109 1110 // chain together promoted objects (the last one
1110 1111 // would have a null value).
1111 1112 assert(oop(p)->is_oop(true), "Should be an oop");
1112 1113 return true;
1113 1114 } else {
1114 1115 return false; // Was not an object at the start of collection.
1115 1116 }
1116 1117 }
1117 1118
1118 1119 // Check if the object is alive. This fact is checked either by consulting
1119 1120 // the main marking bitmap in the sweeping phase or, if it's a permanent
1120 1121 // generation and we're not in the sweeping phase, by checking the
1121 1122 // perm_gen_verify_bit_map where we store the "deadness" information if
1122 1123 // we did not sweep the perm gen in the most recent previous GC cycle.
1123 1124 bool CompactibleFreeListSpace::obj_is_alive(const HeapWord* p) const {
1124 1125 assert(SafepointSynchronize::is_at_safepoint() || !is_init_completed(),
1125 1126 "Else races are possible");
1126 1127 assert(block_is_obj(p), "The address should point to an object");
1127 1128
1128 1129 // If we're sweeping, we use object liveness information from the main bit map
1129 1130 // for both perm gen and old gen.
1130 1131 // We don't need to lock the bitmap (live_map or dead_map below), because
1131 1132 // EITHER we are in the middle of the sweeping phase, and the
1132 1133 // main marking bit map (live_map below) is locked,
1133 1134 // OR we're in other phases and perm_gen_verify_bit_map (dead_map below)
1134 1135 // is stable, because it's mutated only in the sweeping phase.
1135 1136 // NOTE: This method is also used by jmap where, if class unloading is
1136 1137 // off, the results can return "false" for legitimate perm objects,
1137 1138 // when we are not in the midst of a sweeping phase, which can result
1138 1139 // in jmap not reporting certain perm gen objects. This will be moot
1139 1140 // if/when the perm gen goes away in the future.
1140 1141 if (_collector->abstract_state() == CMSCollector::Sweeping) {
1141 1142 CMSBitMap* live_map = _collector->markBitMap();
1142 1143 return live_map->par_isMarked((HeapWord*) p);
1143 1144 } else {
1144 1145 // If we're not currently sweeping and we haven't swept the perm gen in
1145 1146 // the previous concurrent cycle then we may have dead but unswept objects
1146 1147 // in the perm gen. In this case, we use the "deadness" information
1147 1148 // that we had saved in perm_gen_verify_bit_map at the last sweep.
1148 1149 if (!CMSClassUnloadingEnabled && _collector->_permGen->reserved().contains(p)) {
1149 1150 if (_collector->verifying()) {
1150 1151 CMSBitMap* dead_map = _collector->perm_gen_verify_bit_map();
1151 1152 // Object is marked in the dead_map bitmap at the previous sweep
1152 1153 // when we know that it's dead; if the bitmap is not allocated then
1153 1154 // the object is alive.
1154 1155 return (dead_map->sizeInBits() == 0) // bit_map has been allocated
1155 1156 || !dead_map->par_isMarked((HeapWord*) p);
1156 1157 } else {
1157 1158 return false; // We can't say for sure if it's live, so we say that it's dead.
1158 1159 }
1159 1160 }
1160 1161 }
1161 1162 return true;
1162 1163 }
1163 1164
1164 1165 bool CompactibleFreeListSpace::block_is_obj_nopar(const HeapWord* p) const {
1165 1166 FreeChunk* fc = (FreeChunk*)p;
1166 1167 assert(is_in_reserved(p), "Should be in space");
1167 1168 assert(_bt.block_start(p) == p, "Should be a block boundary");
1168 1169 if (!fc->is_free()) {
1169 1170 // Ignore mark word because it may have been used to
1170 1171 // chain together promoted objects (the last one
1171 1172 // would have a null value).
1172 1173 assert(oop(p)->is_oop(true), "Should be an oop");
1173 1174 return true;
1174 1175 }
1175 1176 return false;
1176 1177 }
1177 1178
1178 1179 // "MT-safe but not guaranteed MT-precise" (TM); you may get an
1179 1180 // approximate answer if you don't hold the freelistlock when you call this.
1180 1181 size_t CompactibleFreeListSpace::totalSizeInIndexedFreeLists() const {
1181 1182 size_t size = 0;
1182 1183 for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
1183 1184 debug_only(
1184 1185 // We may be calling here without the lock in which case we
1185 1186 // won't do this modest sanity check.
1186 1187 if (freelistLock()->owned_by_self()) {
1187 1188 size_t total_list_size = 0;
1188 1189 for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
1189 1190 fc = fc->next()) {
1190 1191 total_list_size += i;
1191 1192 }
1192 1193 assert(total_list_size == i * _indexedFreeList[i].count(),
1193 1194 "Count in list is incorrect");
1194 1195 }
1195 1196 )
1196 1197 size += i * _indexedFreeList[i].count();
1197 1198 }
1198 1199 return size;
1199 1200 }
1200 1201
1201 1202 HeapWord* CompactibleFreeListSpace::par_allocate(size_t size) {
1202 1203 MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
1203 1204 return allocate(size);
1204 1205 }
1205 1206
1206 1207 HeapWord*
1207 1208 CompactibleFreeListSpace::getChunkFromSmallLinearAllocBlockRemainder(size_t size) {
1208 1209 return getChunkFromLinearAllocBlockRemainder(&_smallLinearAllocBlock, size);
1209 1210 }
1210 1211
1211 1212 HeapWord* CompactibleFreeListSpace::allocate(size_t size) {
1212 1213 assert_lock_strong(freelistLock());
1213 1214 HeapWord* res = NULL;
1214 1215 assert(size == adjustObjectSize(size),
1215 1216 "use adjustObjectSize() before calling into allocate()");
1216 1217
1217 1218 if (_adaptive_freelists) {
1218 1219 res = allocate_adaptive_freelists(size);
1219 1220 } else { // non-adaptive free lists
1220 1221 res = allocate_non_adaptive_freelists(size);
1221 1222 }
1222 1223
1223 1224 if (res != NULL) {
1224 1225 // check that res does lie in this space!
1225 1226 assert(is_in_reserved(res), "Not in this space!");
1226 1227 assert(is_aligned((void*)res), "alignment check");
1227 1228
1228 1229 FreeChunk* fc = (FreeChunk*)res;
1229 1230 fc->markNotFree();
1230 1231 assert(!fc->is_free(), "shouldn't be marked free");
1231 1232 assert(oop(fc)->klass_or_null() == NULL, "should look uninitialized");
1232 1233 // Verify that the block offset table shows this to
1233 1234 // be a single block, but not one which is unallocated.
1234 1235 _bt.verify_single_block(res, size);
1235 1236 _bt.verify_not_unallocated(res, size);
1236 1237 // mangle a just allocated object with a distinct pattern.
1237 1238 debug_only(fc->mangleAllocated(size));
1238 1239 }
1239 1240
1240 1241 return res;
1241 1242 }
1242 1243
1243 1244 HeapWord* CompactibleFreeListSpace::allocate_non_adaptive_freelists(size_t size) {
1244 1245 HeapWord* res = NULL;
1245 1246 // try and use linear allocation for smaller blocks
1246 1247 if (size < _smallLinearAllocBlock._allocation_size_limit) {
1247 1248 // if successful, the following also adjusts block offset table
1248 1249 res = getChunkFromSmallLinearAllocBlock(size);
1249 1250 }
1250 1251 // Else triage to indexed lists for smaller sizes
1251 1252 if (res == NULL) {
1252 1253 if (size < SmallForDictionary) {
1253 1254 res = (HeapWord*) getChunkFromIndexedFreeList(size);
1254 1255 } else {
1255 1256 // else get it from the big dictionary; if even this doesn't
1256 1257 // work we are out of luck.
1257 1258 res = (HeapWord*)getChunkFromDictionaryExact(size);
1258 1259 }
1259 1260 }
1260 1261
1261 1262 return res;
1262 1263 }
1263 1264
1264 1265 HeapWord* CompactibleFreeListSpace::allocate_adaptive_freelists(size_t size) {
1265 1266 assert_lock_strong(freelistLock());
1266 1267 HeapWord* res = NULL;
1267 1268 assert(size == adjustObjectSize(size),
1268 1269 "use adjustObjectSize() before calling into allocate()");
1269 1270
1270 1271 // Strategy
1271 1272 // if small
1272 1273 // exact size from small object indexed list if small
1273 1274 // small or large linear allocation block (linAB) as appropriate
1274 1275 // take from lists of greater sized chunks
1275 1276 // else
1276 1277 // dictionary
1277 1278 // small or large linear allocation block if it has the space
1278 1279 // Try allocating exact size from indexTable first
1279 1280 if (size < IndexSetSize) {
1280 1281 res = (HeapWord*) getChunkFromIndexedFreeList(size);
1281 1282 if(res != NULL) {
1282 1283 assert(res != (HeapWord*)_indexedFreeList[size].head(),
1283 1284 "Not removed from free list");
1284 1285 // no block offset table adjustment is necessary on blocks in
1285 1286 // the indexed lists.
1286 1287
1287 1288 // Try allocating from the small LinAB
1288 1289 } else if (size < _smallLinearAllocBlock._allocation_size_limit &&
1289 1290 (res = getChunkFromSmallLinearAllocBlock(size)) != NULL) {
1290 1291 // if successful, the above also adjusts block offset table
1291 1292 // Note that this call will refill the LinAB to
1292 1293 // satisfy the request. This is different that
1293 1294 // evm.
1294 1295 // Don't record chunk off a LinAB? smallSplitBirth(size);
1295 1296 } else {
1296 1297 // Raid the exact free lists larger than size, even if they are not
1297 1298 // overpopulated.
1298 1299 res = (HeapWord*) getChunkFromGreater(size);
1299 1300 }
1300 1301 } else {
1301 1302 // Big objects get allocated directly from the dictionary.
1302 1303 res = (HeapWord*) getChunkFromDictionaryExact(size);
1303 1304 if (res == NULL) {
1304 1305 // Try hard not to fail since an allocation failure will likely
1305 1306 // trigger a synchronous GC. Try to get the space from the
1306 1307 // allocation blocks.
1307 1308 res = getChunkFromSmallLinearAllocBlockRemainder(size);
1308 1309 }
1309 1310 }
1310 1311
1311 1312 return res;
1312 1313 }
1313 1314
1314 1315 // A worst-case estimate of the space required (in HeapWords) to expand the heap
1315 1316 // when promoting obj.
1316 1317 size_t CompactibleFreeListSpace::expansionSpaceRequired(size_t obj_size) const {
1317 1318 // Depending on the object size, expansion may require refilling either a
1318 1319 // bigLAB or a smallLAB plus refilling a PromotionInfo object. MinChunkSize
1319 1320 // is added because the dictionary may over-allocate to avoid fragmentation.
1320 1321 size_t space = obj_size;
1321 1322 if (!_adaptive_freelists) {
1322 1323 space = MAX2(space, _smallLinearAllocBlock._refillSize);
1323 1324 }
1324 1325 space += _promoInfo.refillSize() + 2 * MinChunkSize;
1325 1326 return space;
1326 1327 }
1327 1328
1328 1329 FreeChunk* CompactibleFreeListSpace::getChunkFromGreater(size_t numWords) {
1329 1330 FreeChunk* ret;
1330 1331
1331 1332 assert(numWords >= MinChunkSize, "Size is less than minimum");
1332 1333 assert(linearAllocationWouldFail() || bestFitFirst(),
1333 1334 "Should not be here");
1334 1335
1335 1336 size_t i;
1336 1337 size_t currSize = numWords + MinChunkSize;
1337 1338 assert(currSize % MinObjAlignment == 0, "currSize should be aligned");
1338 1339 for (i = currSize; i < IndexSetSize; i += IndexSetStride) {
1339 1340 FreeList<FreeChunk>* fl = &_indexedFreeList[i];
1340 1341 if (fl->head()) {
1341 1342 ret = getFromListGreater(fl, numWords);
1342 1343 assert(ret == NULL || ret->is_free(), "Should be returning a free chunk");
1343 1344 return ret;
1344 1345 }
1345 1346 }
1346 1347
1347 1348 currSize = MAX2((size_t)SmallForDictionary,
1348 1349 (size_t)(numWords + MinChunkSize));
1349 1350
1350 1351 /* Try to get a chunk that satisfies request, while avoiding
1351 1352 fragmentation that can't be handled. */
1352 1353 {
1353 1354 ret = dictionary()->get_chunk(currSize);
1354 1355 if (ret != NULL) {
1355 1356 assert(ret->size() - numWords >= MinChunkSize,
1356 1357 "Chunk is too small");
1357 1358 _bt.allocated((HeapWord*)ret, ret->size());
1358 1359 /* Carve returned chunk. */
1359 1360 (void) splitChunkAndReturnRemainder(ret, numWords);
1360 1361 /* Label this as no longer a free chunk. */
1361 1362 assert(ret->is_free(), "This chunk should be free");
1362 1363 ret->link_prev(NULL);
1363 1364 }
1364 1365 assert(ret == NULL || ret->is_free(), "Should be returning a free chunk");
1365 1366 return ret;
1366 1367 }
1367 1368 ShouldNotReachHere();
1368 1369 }
1369 1370
1370 1371 bool CompactibleFreeListSpace::verifyChunkInIndexedFreeLists(FreeChunk* fc) const {
1371 1372 assert(fc->size() < IndexSetSize, "Size of chunk is too large");
1372 1373 return _indexedFreeList[fc->size()].verify_chunk_in_free_list(fc);
1373 1374 }
1374 1375
1375 1376 bool CompactibleFreeListSpace::verify_chunk_is_linear_alloc_block(FreeChunk* fc) const {
1376 1377 assert((_smallLinearAllocBlock._ptr != (HeapWord*)fc) ||
1377 1378 (_smallLinearAllocBlock._word_size == fc->size()),
1378 1379 "Linear allocation block shows incorrect size");
1379 1380 return ((_smallLinearAllocBlock._ptr == (HeapWord*)fc) &&
1380 1381 (_smallLinearAllocBlock._word_size == fc->size()));
1381 1382 }
1382 1383
1383 1384 // Check if the purported free chunk is present either as a linear
1384 1385 // allocation block, the size-indexed table of (smaller) free blocks,
1385 1386 // or the larger free blocks kept in the binary tree dictionary.
1386 1387 bool CompactibleFreeListSpace::verify_chunk_in_free_list(FreeChunk* fc) const {
1387 1388 if (verify_chunk_is_linear_alloc_block(fc)) {
1388 1389 return true;
1389 1390 } else if (fc->size() < IndexSetSize) {
1390 1391 return verifyChunkInIndexedFreeLists(fc);
1391 1392 } else {
1392 1393 return dictionary()->verify_chunk_in_free_list(fc);
1393 1394 }
1394 1395 }
1395 1396
1396 1397 #ifndef PRODUCT
1397 1398 void CompactibleFreeListSpace::assert_locked() const {
1398 1399 CMSLockVerifier::assert_locked(freelistLock(), parDictionaryAllocLock());
1399 1400 }
1400 1401
1401 1402 void CompactibleFreeListSpace::assert_locked(const Mutex* lock) const {
1402 1403 CMSLockVerifier::assert_locked(lock);
1403 1404 }
1404 1405 #endif
1405 1406
1406 1407 FreeChunk* CompactibleFreeListSpace::allocateScratch(size_t size) {
1407 1408 // In the parallel case, the main thread holds the free list lock
1408 1409 // on behalf the parallel threads.
1409 1410 FreeChunk* fc;
1410 1411 {
1411 1412 // If GC is parallel, this might be called by several threads.
1412 1413 // This should be rare enough that the locking overhead won't affect
1413 1414 // the sequential code.
1414 1415 MutexLockerEx x(parDictionaryAllocLock(),
1415 1416 Mutex::_no_safepoint_check_flag);
1416 1417 fc = getChunkFromDictionary(size);
1417 1418 }
1418 1419 if (fc != NULL) {
1419 1420 fc->dontCoalesce();
1420 1421 assert(fc->is_free(), "Should be free, but not coalescable");
1421 1422 // Verify that the block offset table shows this to
1422 1423 // be a single block, but not one which is unallocated.
1423 1424 _bt.verify_single_block((HeapWord*)fc, fc->size());
1424 1425 _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
1425 1426 }
1426 1427 return fc;
1427 1428 }
1428 1429
1429 1430 oop CompactibleFreeListSpace::promote(oop obj, size_t obj_size) {
1430 1431 assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
1431 1432 assert_locked();
1432 1433
1433 1434 // if we are tracking promotions, then first ensure space for
1434 1435 // promotion (including spooling space for saving header if necessary).
1435 1436 // then allocate and copy, then track promoted info if needed.
1436 1437 // When tracking (see PromotionInfo::track()), the mark word may
1437 1438 // be displaced and in this case restoration of the mark word
1438 1439 // occurs in the (oop_since_save_marks_)iterate phase.
1439 1440 if (_promoInfo.tracking() && !_promoInfo.ensure_spooling_space()) {
1440 1441 return NULL;
1441 1442 }
1442 1443 // Call the allocate(size_t, bool) form directly to avoid the
1443 1444 // additional call through the allocate(size_t) form. Having
1444 1445 // the compile inline the call is problematic because allocate(size_t)
1445 1446 // is a virtual method.
1446 1447 HeapWord* res = allocate(adjustObjectSize(obj_size));
1447 1448 if (res != NULL) {
1448 1449 Copy::aligned_disjoint_words((HeapWord*)obj, res, obj_size);
1449 1450 // if we should be tracking promotions, do so.
1450 1451 if (_promoInfo.tracking()) {
1451 1452 _promoInfo.track((PromotedObject*)res);
1452 1453 }
1453 1454 }
1454 1455 return oop(res);
1455 1456 }
1456 1457
1457 1458 HeapWord*
1458 1459 CompactibleFreeListSpace::getChunkFromSmallLinearAllocBlock(size_t size) {
1459 1460 assert_locked();
1460 1461 assert(size >= MinChunkSize, "minimum chunk size");
1461 1462 assert(size < _smallLinearAllocBlock._allocation_size_limit,
1462 1463 "maximum from smallLinearAllocBlock");
1463 1464 return getChunkFromLinearAllocBlock(&_smallLinearAllocBlock, size);
1464 1465 }
1465 1466
1466 1467 HeapWord*
1467 1468 CompactibleFreeListSpace::getChunkFromLinearAllocBlock(LinearAllocBlock *blk,
1468 1469 size_t size) {
1469 1470 assert_locked();
1470 1471 assert(size >= MinChunkSize, "too small");
1471 1472 HeapWord* res = NULL;
1472 1473 // Try to do linear allocation from blk, making sure that
1473 1474 if (blk->_word_size == 0) {
1474 1475 // We have probably been unable to fill this either in the prologue or
1475 1476 // when it was exhausted at the last linear allocation. Bail out until
1476 1477 // next time.
1477 1478 assert(blk->_ptr == NULL, "consistency check");
1478 1479 return NULL;
1479 1480 }
1480 1481 assert(blk->_word_size != 0 && blk->_ptr != NULL, "consistency check");
1481 1482 res = getChunkFromLinearAllocBlockRemainder(blk, size);
1482 1483 if (res != NULL) return res;
1483 1484
1484 1485 // about to exhaust this linear allocation block
1485 1486 if (blk->_word_size == size) { // exactly satisfied
1486 1487 res = blk->_ptr;
1487 1488 _bt.allocated(res, blk->_word_size);
1488 1489 } else if (size + MinChunkSize <= blk->_refillSize) {
1489 1490 size_t sz = blk->_word_size;
1490 1491 // Update _unallocated_block if the size is such that chunk would be
1491 1492 // returned to the indexed free list. All other chunks in the indexed
1492 1493 // free lists are allocated from the dictionary so that _unallocated_block
1493 1494 // has already been adjusted for them. Do it here so that the cost
1494 1495 // for all chunks added back to the indexed free lists.
1495 1496 if (sz < SmallForDictionary) {
1496 1497 _bt.allocated(blk->_ptr, sz);
1497 1498 }
1498 1499 // Return the chunk that isn't big enough, and then refill below.
1499 1500 addChunkToFreeLists(blk->_ptr, sz);
1500 1501 split_birth(sz);
1501 1502 // Don't keep statistics on adding back chunk from a LinAB.
1502 1503 } else {
1503 1504 // A refilled block would not satisfy the request.
1504 1505 return NULL;
1505 1506 }
1506 1507
1507 1508 blk->_ptr = NULL; blk->_word_size = 0;
1508 1509 refillLinearAllocBlock(blk);
1509 1510 assert(blk->_ptr == NULL || blk->_word_size >= size + MinChunkSize,
1510 1511 "block was replenished");
1511 1512 if (res != NULL) {
1512 1513 split_birth(size);
1513 1514 repairLinearAllocBlock(blk);
1514 1515 } else if (blk->_ptr != NULL) {
1515 1516 res = blk->_ptr;
1516 1517 size_t blk_size = blk->_word_size;
1517 1518 blk->_word_size -= size;
1518 1519 blk->_ptr += size;
1519 1520 split_birth(size);
1520 1521 repairLinearAllocBlock(blk);
1521 1522 // Update BOT last so that other (parallel) GC threads see a consistent
1522 1523 // view of the BOT and free blocks.
1523 1524 // Above must occur before BOT is updated below.
1524 1525 OrderAccess::storestore();
1525 1526 _bt.split_block(res, blk_size, size); // adjust block offset table
1526 1527 }
1527 1528 return res;
1528 1529 }
1529 1530
1530 1531 HeapWord* CompactibleFreeListSpace::getChunkFromLinearAllocBlockRemainder(
1531 1532 LinearAllocBlock* blk,
1532 1533 size_t size) {
1533 1534 assert_locked();
1534 1535 assert(size >= MinChunkSize, "too small");
1535 1536
1536 1537 HeapWord* res = NULL;
1537 1538 // This is the common case. Keep it simple.
1538 1539 if (blk->_word_size >= size + MinChunkSize) {
1539 1540 assert(blk->_ptr != NULL, "consistency check");
1540 1541 res = blk->_ptr;
1541 1542 // Note that the BOT is up-to-date for the linAB before allocation. It
1542 1543 // indicates the start of the linAB. The split_block() updates the
1543 1544 // BOT for the linAB after the allocation (indicates the start of the
1544 1545 // next chunk to be allocated).
1545 1546 size_t blk_size = blk->_word_size;
1546 1547 blk->_word_size -= size;
1547 1548 blk->_ptr += size;
1548 1549 split_birth(size);
1549 1550 repairLinearAllocBlock(blk);
1550 1551 // Update BOT last so that other (parallel) GC threads see a consistent
1551 1552 // view of the BOT and free blocks.
1552 1553 // Above must occur before BOT is updated below.
1553 1554 OrderAccess::storestore();
1554 1555 _bt.split_block(res, blk_size, size); // adjust block offset table
1555 1556 _bt.allocated(res, size);
1556 1557 }
1557 1558 return res;
1558 1559 }
1559 1560
1560 1561 FreeChunk*
1561 1562 CompactibleFreeListSpace::getChunkFromIndexedFreeList(size_t size) {
1562 1563 assert_locked();
1563 1564 assert(size < SmallForDictionary, "just checking");
1564 1565 FreeChunk* res;
1565 1566 res = _indexedFreeList[size].get_chunk_at_head();
1566 1567 if (res == NULL) {
1567 1568 res = getChunkFromIndexedFreeListHelper(size);
1568 1569 }
1569 1570 _bt.verify_not_unallocated((HeapWord*) res, size);
1570 1571 assert(res == NULL || res->size() == size, "Incorrect block size");
1571 1572 return res;
1572 1573 }
1573 1574
1574 1575 FreeChunk*
1575 1576 CompactibleFreeListSpace::getChunkFromIndexedFreeListHelper(size_t size,
1576 1577 bool replenish) {
1577 1578 assert_locked();
1578 1579 FreeChunk* fc = NULL;
1579 1580 if (size < SmallForDictionary) {
1580 1581 assert(_indexedFreeList[size].head() == NULL ||
1581 1582 _indexedFreeList[size].surplus() <= 0,
1582 1583 "List for this size should be empty or under populated");
1583 1584 // Try best fit in exact lists before replenishing the list
1584 1585 if (!bestFitFirst() || (fc = bestFitSmall(size)) == NULL) {
1585 1586 // Replenish list.
1586 1587 //
1587 1588 // Things tried that failed.
1588 1589 // Tried allocating out of the two LinAB's first before
1589 1590 // replenishing lists.
1590 1591 // Tried small linAB of size 256 (size in indexed list)
1591 1592 // and replenishing indexed lists from the small linAB.
1592 1593 //
1593 1594 FreeChunk* newFc = NULL;
1594 1595 const size_t replenish_size = CMSIndexedFreeListReplenish * size;
1595 1596 if (replenish_size < SmallForDictionary) {
1596 1597 // Do not replenish from an underpopulated size.
1597 1598 if (_indexedFreeList[replenish_size].surplus() > 0 &&
1598 1599 _indexedFreeList[replenish_size].head() != NULL) {
1599 1600 newFc = _indexedFreeList[replenish_size].get_chunk_at_head();
1600 1601 } else if (bestFitFirst()) {
1601 1602 newFc = bestFitSmall(replenish_size);
1602 1603 }
1603 1604 }
1604 1605 if (newFc == NULL && replenish_size > size) {
1605 1606 assert(CMSIndexedFreeListReplenish > 1, "ctl pt invariant");
1606 1607 newFc = getChunkFromIndexedFreeListHelper(replenish_size, false);
1607 1608 }
1608 1609 // Note: The stats update re split-death of block obtained above
1609 1610 // will be recorded below precisely when we know we are going to
1610 1611 // be actually splitting it into more than one pieces below.
1611 1612 if (newFc != NULL) {
1612 1613 if (replenish || CMSReplenishIntermediate) {
1613 1614 // Replenish this list and return one block to caller.
1614 1615 size_t i;
1615 1616 FreeChunk *curFc, *nextFc;
1616 1617 size_t num_blk = newFc->size() / size;
1617 1618 assert(num_blk >= 1, "Smaller than requested?");
1618 1619 assert(newFc->size() % size == 0, "Should be integral multiple of request");
1619 1620 if (num_blk > 1) {
1620 1621 // we are sure we will be splitting the block just obtained
1621 1622 // into multiple pieces; record the split-death of the original
1622 1623 splitDeath(replenish_size);
1623 1624 }
1624 1625 // carve up and link blocks 0, ..., num_blk - 2
1625 1626 // The last chunk is not added to the lists but is returned as the
1626 1627 // free chunk.
1627 1628 for (curFc = newFc, nextFc = (FreeChunk*)((HeapWord*)curFc + size),
1628 1629 i = 0;
1629 1630 i < (num_blk - 1);
1630 1631 curFc = nextFc, nextFc = (FreeChunk*)((HeapWord*)nextFc + size),
1631 1632 i++) {
1632 1633 curFc->set_size(size);
1633 1634 // Don't record this as a return in order to try and
1634 1635 // determine the "returns" from a GC.
1635 1636 _bt.verify_not_unallocated((HeapWord*) fc, size);
1636 1637 _indexedFreeList[size].return_chunk_at_tail(curFc, false);
1637 1638 _bt.mark_block((HeapWord*)curFc, size);
1638 1639 split_birth(size);
1639 1640 // Don't record the initial population of the indexed list
1640 1641 // as a split birth.
1641 1642 }
1642 1643
1643 1644 // check that the arithmetic was OK above
1644 1645 assert((HeapWord*)nextFc == (HeapWord*)newFc + num_blk*size,
1645 1646 "inconsistency in carving newFc");
1646 1647 curFc->set_size(size);
1647 1648 _bt.mark_block((HeapWord*)curFc, size);
1648 1649 split_birth(size);
1649 1650 fc = curFc;
1650 1651 } else {
1651 1652 // Return entire block to caller
1652 1653 fc = newFc;
1653 1654 }
1654 1655 }
1655 1656 }
1656 1657 } else {
1657 1658 // Get a free chunk from the free chunk dictionary to be returned to
1658 1659 // replenish the indexed free list.
1659 1660 fc = getChunkFromDictionaryExact(size);
1660 1661 }
1661 1662 // assert(fc == NULL || fc->is_free(), "Should be returning a free chunk");
1662 1663 return fc;
1663 1664 }
1664 1665
1665 1666 FreeChunk*
1666 1667 CompactibleFreeListSpace::getChunkFromDictionary(size_t size) {
1667 1668 assert_locked();
1668 1669 FreeChunk* fc = _dictionary->get_chunk(size);
1669 1670 if (fc == NULL) {
1670 1671 return NULL;
1671 1672 }
1672 1673 _bt.allocated((HeapWord*)fc, fc->size());
1673 1674 if (fc->size() >= size + MinChunkSize) {
1674 1675 fc = splitChunkAndReturnRemainder(fc, size);
1675 1676 }
1676 1677 assert(fc->size() >= size, "chunk too small");
1677 1678 assert(fc->size() < size + MinChunkSize, "chunk too big");
1678 1679 _bt.verify_single_block((HeapWord*)fc, fc->size());
1679 1680 return fc;
1680 1681 }
1681 1682
1682 1683 FreeChunk*
1683 1684 CompactibleFreeListSpace::getChunkFromDictionaryExact(size_t size) {
1684 1685 assert_locked();
1685 1686 FreeChunk* fc = _dictionary->get_chunk(size);
1686 1687 if (fc == NULL) {
1687 1688 return fc;
1688 1689 }
1689 1690 _bt.allocated((HeapWord*)fc, fc->size());
1690 1691 if (fc->size() == size) {
1691 1692 _bt.verify_single_block((HeapWord*)fc, size);
1692 1693 return fc;
1693 1694 }
1694 1695 assert(fc->size() > size, "get_chunk() guarantee");
1695 1696 if (fc->size() < size + MinChunkSize) {
1696 1697 // Return the chunk to the dictionary and go get a bigger one.
1697 1698 returnChunkToDictionary(fc);
1698 1699 fc = _dictionary->get_chunk(size + MinChunkSize);
1699 1700 if (fc == NULL) {
1700 1701 return NULL;
1701 1702 }
1702 1703 _bt.allocated((HeapWord*)fc, fc->size());
1703 1704 }
1704 1705 assert(fc->size() >= size + MinChunkSize, "tautology");
1705 1706 fc = splitChunkAndReturnRemainder(fc, size);
1706 1707 assert(fc->size() == size, "chunk is wrong size");
1707 1708 _bt.verify_single_block((HeapWord*)fc, size);
1708 1709 return fc;
1709 1710 }
1710 1711
1711 1712 void
1712 1713 CompactibleFreeListSpace::returnChunkToDictionary(FreeChunk* chunk) {
1713 1714 assert_locked();
1714 1715
1715 1716 size_t size = chunk->size();
1716 1717 _bt.verify_single_block((HeapWord*)chunk, size);
1717 1718 // adjust _unallocated_block downward, as necessary
1718 1719 _bt.freed((HeapWord*)chunk, size);
1719 1720 _dictionary->return_chunk(chunk);
1720 1721 #ifndef PRODUCT
1721 1722 if (CMSCollector::abstract_state() != CMSCollector::Sweeping) {
1722 1723 TreeChunk<FreeChunk>::as_TreeChunk(chunk)->list()->verify_stats();
1723 1724 }
1724 1725 #endif // PRODUCT
1725 1726 }
1726 1727
1727 1728 void
1728 1729 CompactibleFreeListSpace::returnChunkToFreeList(FreeChunk* fc) {
1729 1730 assert_locked();
1730 1731 size_t size = fc->size();
1731 1732 _bt.verify_single_block((HeapWord*) fc, size);
1732 1733 _bt.verify_not_unallocated((HeapWord*) fc, size);
1733 1734 if (_adaptive_freelists) {
1734 1735 _indexedFreeList[size].return_chunk_at_tail(fc);
1735 1736 } else {
1736 1737 _indexedFreeList[size].return_chunk_at_head(fc);
1737 1738 }
1738 1739 #ifndef PRODUCT
1739 1740 if (CMSCollector::abstract_state() != CMSCollector::Sweeping) {
1740 1741 _indexedFreeList[size].verify_stats();
1741 1742 }
1742 1743 #endif // PRODUCT
1743 1744 }
1744 1745
1745 1746 // Add chunk to end of last block -- if it's the largest
1746 1747 // block -- and update BOT and census data. We would
1747 1748 // of course have preferred to coalesce it with the
1748 1749 // last block, but it's currently less expensive to find the
1749 1750 // largest block than it is to find the last.
1750 1751 void
1751 1752 CompactibleFreeListSpace::addChunkToFreeListsAtEndRecordingStats(
1752 1753 HeapWord* chunk, size_t size) {
1753 1754 // check that the chunk does lie in this space!
1754 1755 assert(chunk != NULL && is_in_reserved(chunk), "Not in this space!");
1755 1756 // One of the parallel gc task threads may be here
1756 1757 // whilst others are allocating.
1757 1758 Mutex* lock = NULL;
1758 1759 if (ParallelGCThreads != 0) {
1759 1760 lock = &_parDictionaryAllocLock;
1760 1761 }
1761 1762 FreeChunk* ec;
1762 1763 {
1763 1764 MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag);
1764 1765 ec = dictionary()->find_largest_dict(); // get largest block
1765 1766 if (ec != NULL && ec->end() == chunk) {
1766 1767 // It's a coterminal block - we can coalesce.
1767 1768 size_t old_size = ec->size();
1768 1769 coalDeath(old_size);
1769 1770 removeChunkFromDictionary(ec);
1770 1771 size += old_size;
1771 1772 } else {
1772 1773 ec = (FreeChunk*)chunk;
1773 1774 }
1774 1775 }
1775 1776 ec->set_size(size);
1776 1777 debug_only(ec->mangleFreed(size));
1777 1778 if (size < SmallForDictionary) {
1778 1779 lock = _indexedFreeListParLocks[size];
1779 1780 }
1780 1781 MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag);
1781 1782 addChunkAndRepairOffsetTable((HeapWord*)ec, size, true);
1782 1783 // record the birth under the lock since the recording involves
1783 1784 // manipulation of the list on which the chunk lives and
1784 1785 // if the chunk is allocated and is the last on the list,
1785 1786 // the list can go away.
1786 1787 coalBirth(size);
1787 1788 }
1788 1789
1789 1790 void
1790 1791 CompactibleFreeListSpace::addChunkToFreeLists(HeapWord* chunk,
1791 1792 size_t size) {
1792 1793 // check that the chunk does lie in this space!
1793 1794 assert(chunk != NULL && is_in_reserved(chunk), "Not in this space!");
1794 1795 assert_locked();
1795 1796 _bt.verify_single_block(chunk, size);
1796 1797
1797 1798 FreeChunk* fc = (FreeChunk*) chunk;
1798 1799 fc->set_size(size);
1799 1800 debug_only(fc->mangleFreed(size));
1800 1801 if (size < SmallForDictionary) {
1801 1802 returnChunkToFreeList(fc);
1802 1803 } else {
1803 1804 returnChunkToDictionary(fc);
1804 1805 }
1805 1806 }
1806 1807
1807 1808 void
1808 1809 CompactibleFreeListSpace::addChunkAndRepairOffsetTable(HeapWord* chunk,
1809 1810 size_t size, bool coalesced) {
1810 1811 assert_locked();
1811 1812 assert(chunk != NULL, "null chunk");
1812 1813 if (coalesced) {
1813 1814 // repair BOT
1814 1815 _bt.single_block(chunk, size);
1815 1816 }
1816 1817 addChunkToFreeLists(chunk, size);
1817 1818 }
1818 1819
1819 1820 // We _must_ find the purported chunk on our free lists;
1820 1821 // we assert if we don't.
1821 1822 void
1822 1823 CompactibleFreeListSpace::removeFreeChunkFromFreeLists(FreeChunk* fc) {
1823 1824 size_t size = fc->size();
1824 1825 assert_locked();
1825 1826 debug_only(verifyFreeLists());
1826 1827 if (size < SmallForDictionary) {
1827 1828 removeChunkFromIndexedFreeList(fc);
1828 1829 } else {
1829 1830 removeChunkFromDictionary(fc);
1830 1831 }
1831 1832 _bt.verify_single_block((HeapWord*)fc, size);
1832 1833 debug_only(verifyFreeLists());
1833 1834 }
1834 1835
1835 1836 void
1836 1837 CompactibleFreeListSpace::removeChunkFromDictionary(FreeChunk* fc) {
1837 1838 size_t size = fc->size();
1838 1839 assert_locked();
1839 1840 assert(fc != NULL, "null chunk");
1840 1841 _bt.verify_single_block((HeapWord*)fc, size);
1841 1842 _dictionary->remove_chunk(fc);
1842 1843 // adjust _unallocated_block upward, as necessary
1843 1844 _bt.allocated((HeapWord*)fc, size);
1844 1845 }
1845 1846
1846 1847 void
1847 1848 CompactibleFreeListSpace::removeChunkFromIndexedFreeList(FreeChunk* fc) {
1848 1849 assert_locked();
1849 1850 size_t size = fc->size();
1850 1851 _bt.verify_single_block((HeapWord*)fc, size);
1851 1852 NOT_PRODUCT(
1852 1853 if (FLSVerifyIndexTable) {
1853 1854 verifyIndexedFreeList(size);
1854 1855 }
1855 1856 )
1856 1857 _indexedFreeList[size].remove_chunk(fc);
1857 1858 NOT_PRODUCT(
1858 1859 if (FLSVerifyIndexTable) {
1859 1860 verifyIndexedFreeList(size);
1860 1861 }
1861 1862 )
1862 1863 }
1863 1864
1864 1865 FreeChunk* CompactibleFreeListSpace::bestFitSmall(size_t numWords) {
1865 1866 /* A hint is the next larger size that has a surplus.
1866 1867 Start search at a size large enough to guarantee that
1867 1868 the excess is >= MIN_CHUNK. */
1868 1869 size_t start = align_object_size(numWords + MinChunkSize);
1869 1870 if (start < IndexSetSize) {
1870 1871 FreeList<FreeChunk>* it = _indexedFreeList;
1871 1872 size_t hint = _indexedFreeList[start].hint();
1872 1873 while (hint < IndexSetSize) {
1873 1874 assert(hint % MinObjAlignment == 0, "hint should be aligned");
1874 1875 FreeList<FreeChunk> *fl = &_indexedFreeList[hint];
1875 1876 if (fl->surplus() > 0 && fl->head() != NULL) {
1876 1877 // Found a list with surplus, reset original hint
1877 1878 // and split out a free chunk which is returned.
1878 1879 _indexedFreeList[start].set_hint(hint);
1879 1880 FreeChunk* res = getFromListGreater(fl, numWords);
1880 1881 assert(res == NULL || res->is_free(),
1881 1882 "Should be returning a free chunk");
1882 1883 return res;
1883 1884 }
1884 1885 hint = fl->hint(); /* keep looking */
1885 1886 }
1886 1887 /* None found. */
1887 1888 it[start].set_hint(IndexSetSize);
1888 1889 }
1889 1890 return NULL;
1890 1891 }
1891 1892
1892 1893 /* Requires fl->size >= numWords + MinChunkSize */
1893 1894 FreeChunk* CompactibleFreeListSpace::getFromListGreater(FreeList<FreeChunk>* fl,
1894 1895 size_t numWords) {
1895 1896 FreeChunk *curr = fl->head();
1896 1897 size_t oldNumWords = curr->size();
1897 1898 assert(numWords >= MinChunkSize, "Word size is too small");
1898 1899 assert(curr != NULL, "List is empty");
1899 1900 assert(oldNumWords >= numWords + MinChunkSize,
1900 1901 "Size of chunks in the list is too small");
1901 1902
1902 1903 fl->remove_chunk(curr);
1903 1904 // recorded indirectly by splitChunkAndReturnRemainder -
1904 1905 // smallSplit(oldNumWords, numWords);
1905 1906 FreeChunk* new_chunk = splitChunkAndReturnRemainder(curr, numWords);
1906 1907 // Does anything have to be done for the remainder in terms of
1907 1908 // fixing the card table?
1908 1909 assert(new_chunk == NULL || new_chunk->is_free(),
1909 1910 "Should be returning a free chunk");
1910 1911 return new_chunk;
1911 1912 }
1912 1913
1913 1914 FreeChunk*
1914 1915 CompactibleFreeListSpace::splitChunkAndReturnRemainder(FreeChunk* chunk,
1915 1916 size_t new_size) {
1916 1917 assert_locked();
1917 1918 size_t size = chunk->size();
1918 1919 assert(size > new_size, "Split from a smaller block?");
1919 1920 assert(is_aligned(chunk), "alignment problem");
1920 1921 assert(size == adjustObjectSize(size), "alignment problem");
1921 1922 size_t rem_size = size - new_size;
1922 1923 assert(rem_size == adjustObjectSize(rem_size), "alignment problem");
1923 1924 assert(rem_size >= MinChunkSize, "Free chunk smaller than minimum");
1924 1925 FreeChunk* ffc = (FreeChunk*)((HeapWord*)chunk + new_size);
1925 1926 assert(is_aligned(ffc), "alignment problem");
1926 1927 ffc->set_size(rem_size);
1927 1928 ffc->link_next(NULL);
1928 1929 ffc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
1929 1930 // Above must occur before BOT is updated below.
1930 1931 // adjust block offset table
1931 1932 OrderAccess::storestore();
1932 1933 assert(chunk->is_free() && ffc->is_free(), "Error");
1933 1934 _bt.split_block((HeapWord*)chunk, chunk->size(), new_size);
1934 1935 if (rem_size < SmallForDictionary) {
1935 1936 bool is_par = (SharedHeap::heap()->n_par_threads() > 0);
1936 1937 if (is_par) _indexedFreeListParLocks[rem_size]->lock();
1937 1938 assert(!is_par ||
1938 1939 (SharedHeap::heap()->n_par_threads() ==
1939 1940 SharedHeap::heap()->workers()->active_workers()), "Mismatch");
1940 1941 returnChunkToFreeList(ffc);
1941 1942 split(size, rem_size);
1942 1943 if (is_par) _indexedFreeListParLocks[rem_size]->unlock();
1943 1944 } else {
1944 1945 returnChunkToDictionary(ffc);
1945 1946 split(size ,rem_size);
1946 1947 }
1947 1948 chunk->set_size(new_size);
1948 1949 return chunk;
1949 1950 }
1950 1951
1951 1952 void
1952 1953 CompactibleFreeListSpace::sweep_completed() {
1953 1954 // Now that space is probably plentiful, refill linear
1954 1955 // allocation blocks as needed.
1955 1956 refillLinearAllocBlocksIfNeeded();
1956 1957 }
1957 1958
1958 1959 void
1959 1960 CompactibleFreeListSpace::gc_prologue() {
1960 1961 assert_locked();
1961 1962 if (PrintFLSStatistics != 0) {
1962 1963 gclog_or_tty->print("Before GC:\n");
1963 1964 reportFreeListStatistics();
1964 1965 }
1965 1966 refillLinearAllocBlocksIfNeeded();
1966 1967 }
1967 1968
1968 1969 void
1969 1970 CompactibleFreeListSpace::gc_epilogue() {
1970 1971 assert_locked();
1971 1972 if (PrintGCDetails && Verbose && !_adaptive_freelists) {
1972 1973 if (_smallLinearAllocBlock._word_size == 0)
1973 1974 warning("CompactibleFreeListSpace(epilogue):: Linear allocation failure");
1974 1975 }
1975 1976 assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");
1976 1977 _promoInfo.stopTrackingPromotions();
1977 1978 repairLinearAllocationBlocks();
1978 1979 // Print Space's stats
1979 1980 if (PrintFLSStatistics != 0) {
1980 1981 gclog_or_tty->print("After GC:\n");
1981 1982 reportFreeListStatistics();
1982 1983 }
1983 1984 }
1984 1985
1985 1986 // Iteration support, mostly delegated from a CMS generation
1986 1987
1987 1988 void CompactibleFreeListSpace::save_marks() {
1988 1989 assert(Thread::current()->is_VM_thread(),
1989 1990 "Global variable should only be set when single-threaded");
1990 1991 // Mark the "end" of the used space at the time of this call;
1991 1992 // note, however, that promoted objects from this point
1992 1993 // on are tracked in the _promoInfo below.
1993 1994 set_saved_mark_word(unallocated_block());
1994 1995 #ifdef ASSERT
1995 1996 // Check the sanity of save_marks() etc.
1996 1997 MemRegion ur = used_region();
1997 1998 MemRegion urasm = used_region_at_save_marks();
1998 1999 assert(ur.contains(urasm),
1999 2000 err_msg(" Error at save_marks(): [" PTR_FORMAT "," PTR_FORMAT ")"
2000 2001 " should contain [" PTR_FORMAT "," PTR_FORMAT ")",
2001 2002 ur.start(), ur.end(), urasm.start(), urasm.end()));
2002 2003 #endif
2003 2004 // inform allocator that promotions should be tracked.
2004 2005 assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");
2005 2006 _promoInfo.startTrackingPromotions();
2006 2007 }
2007 2008
2008 2009 bool CompactibleFreeListSpace::no_allocs_since_save_marks() {
2009 2010 assert(_promoInfo.tracking(), "No preceding save_marks?");
2010 2011 assert(SharedHeap::heap()->n_par_threads() == 0,
2011 2012 "Shouldn't be called if using parallel gc.");
2012 2013 return _promoInfo.noPromotions();
2013 2014 }
2014 2015
2015 2016 #define CFLS_OOP_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix) \
2016 2017 \
2017 2018 void CompactibleFreeListSpace:: \
2018 2019 oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk) { \
2019 2020 assert(SharedHeap::heap()->n_par_threads() == 0, \
2020 2021 "Shouldn't be called (yet) during parallel part of gc."); \
2021 2022 _promoInfo.promoted_oops_iterate##nv_suffix(blk); \
2022 2023 /* \
2023 2024 * This also restores any displaced headers and removes the elements from \
2024 2025 * the iteration set as they are processed, so that we have a clean slate \
2025 2026 * at the end of the iteration. Note, thus, that if new objects are \
2026 2027 * promoted as a result of the iteration they are iterated over as well. \
2027 2028 */ \
2028 2029 assert(_promoInfo.noPromotions(), "_promoInfo inconsistency"); \
2029 2030 }
2030 2031
2031 2032 ALL_SINCE_SAVE_MARKS_CLOSURES(CFLS_OOP_SINCE_SAVE_MARKS_DEFN)
2032 2033
2033 2034
2034 2035 void CompactibleFreeListSpace::object_iterate_since_last_GC(ObjectClosure* cl) {
2035 2036 // ugghh... how would one do this efficiently for a non-contiguous space?
2036 2037 guarantee(false, "NYI");
2037 2038 }
2038 2039
2039 2040 bool CompactibleFreeListSpace::linearAllocationWouldFail() const {
2040 2041 return _smallLinearAllocBlock._word_size == 0;
2041 2042 }
2042 2043
2043 2044 void CompactibleFreeListSpace::repairLinearAllocationBlocks() {
2044 2045 // Fix up linear allocation blocks to look like free blocks
2045 2046 repairLinearAllocBlock(&_smallLinearAllocBlock);
2046 2047 }
2047 2048
2048 2049 void CompactibleFreeListSpace::repairLinearAllocBlock(LinearAllocBlock* blk) {
2049 2050 assert_locked();
2050 2051 if (blk->_ptr != NULL) {
2051 2052 assert(blk->_word_size != 0 && blk->_word_size >= MinChunkSize,
2052 2053 "Minimum block size requirement");
2053 2054 FreeChunk* fc = (FreeChunk*)(blk->_ptr);
2054 2055 fc->set_size(blk->_word_size);
2055 2056 fc->link_prev(NULL); // mark as free
2056 2057 fc->dontCoalesce();
2057 2058 assert(fc->is_free(), "just marked it free");
2058 2059 assert(fc->cantCoalesce(), "just marked it uncoalescable");
2059 2060 }
2060 2061 }
2061 2062
2062 2063 void CompactibleFreeListSpace::refillLinearAllocBlocksIfNeeded() {
2063 2064 assert_locked();
2064 2065 if (_smallLinearAllocBlock._ptr == NULL) {
2065 2066 assert(_smallLinearAllocBlock._word_size == 0,
2066 2067 "Size of linAB should be zero if the ptr is NULL");
2067 2068 // Reset the linAB refill and allocation size limit.
2068 2069 _smallLinearAllocBlock.set(0, 0, 1024*SmallForLinearAlloc, SmallForLinearAlloc);
2069 2070 }
2070 2071 refillLinearAllocBlockIfNeeded(&_smallLinearAllocBlock);
2071 2072 }
2072 2073
2073 2074 void
2074 2075 CompactibleFreeListSpace::refillLinearAllocBlockIfNeeded(LinearAllocBlock* blk) {
2075 2076 assert_locked();
2076 2077 assert((blk->_ptr == NULL && blk->_word_size == 0) ||
2077 2078 (blk->_ptr != NULL && blk->_word_size >= MinChunkSize),
2078 2079 "blk invariant");
2079 2080 if (blk->_ptr == NULL) {
2080 2081 refillLinearAllocBlock(blk);
2081 2082 }
2082 2083 if (PrintMiscellaneous && Verbose) {
2083 2084 if (blk->_word_size == 0) {
2084 2085 warning("CompactibleFreeListSpace(prologue):: Linear allocation failure");
2085 2086 }
2086 2087 }
2087 2088 }
2088 2089
2089 2090 void
2090 2091 CompactibleFreeListSpace::refillLinearAllocBlock(LinearAllocBlock* blk) {
2091 2092 assert_locked();
2092 2093 assert(blk->_word_size == 0 && blk->_ptr == NULL,
2093 2094 "linear allocation block should be empty");
2094 2095 FreeChunk* fc;
2095 2096 if (blk->_refillSize < SmallForDictionary &&
2096 2097 (fc = getChunkFromIndexedFreeList(blk->_refillSize)) != NULL) {
2097 2098 // A linAB's strategy might be to use small sizes to reduce
2098 2099 // fragmentation but still get the benefits of allocation from a
2099 2100 // linAB.
2100 2101 } else {
2101 2102 fc = getChunkFromDictionary(blk->_refillSize);
2102 2103 }
2103 2104 if (fc != NULL) {
2104 2105 blk->_ptr = (HeapWord*)fc;
2105 2106 blk->_word_size = fc->size();
2106 2107 fc->dontCoalesce(); // to prevent sweeper from sweeping us up
2107 2108 }
2108 2109 }
2109 2110
2110 2111 // Support for concurrent collection policy decisions.
2111 2112 bool CompactibleFreeListSpace::should_concurrent_collect() const {
2112 2113 // In the future we might want to add in frgamentation stats --
2113 2114 // including erosion of the "mountain" into this decision as well.
2114 2115 return !adaptive_freelists() && linearAllocationWouldFail();
2115 2116 }
2116 2117
2117 2118 // Support for compaction
2118 2119
2119 2120 void CompactibleFreeListSpace::prepare_for_compaction(CompactPoint* cp) {
2120 2121 SCAN_AND_FORWARD(cp,end,block_is_obj,block_size);
2121 2122 // prepare_for_compaction() uses the space between live objects
2122 2123 // so that later phase can skip dead space quickly. So verification
2123 2124 // of the free lists doesn't work after.
2124 2125 }
2125 2126
2126 2127 #define obj_size(q) adjustObjectSize(oop(q)->size())
2127 2128 #define adjust_obj_size(s) adjustObjectSize(s)
2128 2129
2129 2130 void CompactibleFreeListSpace::adjust_pointers() {
2130 2131 // In other versions of adjust_pointers(), a bail out
2131 2132 // based on the amount of live data in the generation
2132 2133 // (i.e., if 0, bail out) may be used.
2133 2134 // Cannot test used() == 0 here because the free lists have already
2134 2135 // been mangled by the compaction.
2135 2136
2136 2137 SCAN_AND_ADJUST_POINTERS(adjust_obj_size);
2137 2138 // See note about verification in prepare_for_compaction().
2138 2139 }
2139 2140
2140 2141 void CompactibleFreeListSpace::compact() {
2141 2142 SCAN_AND_COMPACT(obj_size);
2142 2143 }
2143 2144
2144 2145 // fragmentation_metric = 1 - [sum of (fbs**2) / (sum of fbs)**2]
2145 2146 // where fbs is free block sizes
2146 2147 double CompactibleFreeListSpace::flsFrag() const {
2147 2148 size_t itabFree = totalSizeInIndexedFreeLists();
2148 2149 double frag = 0.0;
2149 2150 size_t i;
2150 2151
2151 2152 for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2152 2153 double sz = i;
2153 2154 frag += _indexedFreeList[i].count() * (sz * sz);
2154 2155 }
2155 2156
2156 2157 double totFree = itabFree +
2157 2158 _dictionary->total_chunk_size(DEBUG_ONLY(freelistLock()));
2158 2159 if (totFree > 0) {
2159 2160 frag = ((frag + _dictionary->sum_of_squared_block_sizes()) /
2160 2161 (totFree * totFree));
2161 2162 frag = (double)1.0 - frag;
2162 2163 } else {
2163 2164 assert(frag == 0.0, "Follows from totFree == 0");
2164 2165 }
2165 2166 return frag;
2166 2167 }
2167 2168
2168 2169 void CompactibleFreeListSpace::beginSweepFLCensus(
2169 2170 float inter_sweep_current,
2170 2171 float inter_sweep_estimate,
2171 2172 float intra_sweep_estimate) {
2172 2173 assert_locked();
2173 2174 size_t i;
2174 2175 for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2175 2176 FreeList<FreeChunk>* fl = &_indexedFreeList[i];
2176 2177 if (PrintFLSStatistics > 1) {
2177 2178 gclog_or_tty->print("size[%d] : ", i);
2178 2179 }
2179 2180 fl->compute_desired(inter_sweep_current, inter_sweep_estimate, intra_sweep_estimate);
2180 2181 fl->set_coal_desired((ssize_t)((double)fl->desired() * CMSSmallCoalSurplusPercent));
2181 2182 fl->set_before_sweep(fl->count());
2182 2183 fl->set_bfr_surp(fl->surplus());
2183 2184 }
2184 2185 _dictionary->begin_sweep_dict_census(CMSLargeCoalSurplusPercent,
2185 2186 inter_sweep_current,
2186 2187 inter_sweep_estimate,
2187 2188 intra_sweep_estimate);
2188 2189 }
2189 2190
2190 2191 void CompactibleFreeListSpace::setFLSurplus() {
2191 2192 assert_locked();
2192 2193 size_t i;
2193 2194 for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2194 2195 FreeList<FreeChunk> *fl = &_indexedFreeList[i];
2195 2196 fl->set_surplus(fl->count() -
2196 2197 (ssize_t)((double)fl->desired() * CMSSmallSplitSurplusPercent));
2197 2198 }
2198 2199 }
2199 2200
2200 2201 void CompactibleFreeListSpace::setFLHints() {
2201 2202 assert_locked();
2202 2203 size_t i;
2203 2204 size_t h = IndexSetSize;
2204 2205 for (i = IndexSetSize - 1; i != 0; i -= IndexSetStride) {
2205 2206 FreeList<FreeChunk> *fl = &_indexedFreeList[i];
2206 2207 fl->set_hint(h);
2207 2208 if (fl->surplus() > 0) {
2208 2209 h = i;
2209 2210 }
2210 2211 }
2211 2212 }
2212 2213
2213 2214 void CompactibleFreeListSpace::clearFLCensus() {
2214 2215 assert_locked();
2215 2216 size_t i;
2216 2217 for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2217 2218 FreeList<FreeChunk> *fl = &_indexedFreeList[i];
2218 2219 fl->set_prev_sweep(fl->count());
2219 2220 fl->set_coal_births(0);
2220 2221 fl->set_coal_deaths(0);
2221 2222 fl->set_split_births(0);
2222 2223 fl->set_split_deaths(0);
2223 2224 }
2224 2225 }
2225 2226
2226 2227 void CompactibleFreeListSpace::endSweepFLCensus(size_t sweep_count) {
2227 2228 if (PrintFLSStatistics > 0) {
2228 2229 HeapWord* largestAddr = (HeapWord*) dictionary()->find_largest_dict();
2229 2230 gclog_or_tty->print_cr("CMS: Large block " PTR_FORMAT,
2230 2231 largestAddr);
2231 2232 }
2232 2233 setFLSurplus();
2233 2234 setFLHints();
2234 2235 if (PrintGC && PrintFLSCensus > 0) {
2235 2236 printFLCensus(sweep_count);
2236 2237 }
2237 2238 clearFLCensus();
2238 2239 assert_locked();
2239 2240 _dictionary->end_sweep_dict_census(CMSLargeSplitSurplusPercent);
2240 2241 }
2241 2242
2242 2243 bool CompactibleFreeListSpace::coalOverPopulated(size_t size) {
2243 2244 if (size < SmallForDictionary) {
2244 2245 FreeList<FreeChunk> *fl = &_indexedFreeList[size];
2245 2246 return (fl->coal_desired() < 0) ||
2246 2247 ((int)fl->count() > fl->coal_desired());
2247 2248 } else {
2248 2249 return dictionary()->coal_dict_over_populated(size);
2249 2250 }
2250 2251 }
2251 2252
2252 2253 void CompactibleFreeListSpace::smallCoalBirth(size_t size) {
2253 2254 assert(size < SmallForDictionary, "Size too large for indexed list");
2254 2255 FreeList<FreeChunk> *fl = &_indexedFreeList[size];
2255 2256 fl->increment_coal_births();
2256 2257 fl->increment_surplus();
2257 2258 }
2258 2259
2259 2260 void CompactibleFreeListSpace::smallCoalDeath(size_t size) {
2260 2261 assert(size < SmallForDictionary, "Size too large for indexed list");
2261 2262 FreeList<FreeChunk> *fl = &_indexedFreeList[size];
2262 2263 fl->increment_coal_deaths();
2263 2264 fl->decrement_surplus();
2264 2265 }
2265 2266
2266 2267 void CompactibleFreeListSpace::coalBirth(size_t size) {
2267 2268 if (size < SmallForDictionary) {
2268 2269 smallCoalBirth(size);
2269 2270 } else {
2270 2271 dictionary()->dict_census_udpate(size,
2271 2272 false /* split */,
2272 2273 true /* birth */);
2273 2274 }
2274 2275 }
2275 2276
2276 2277 void CompactibleFreeListSpace::coalDeath(size_t size) {
2277 2278 if(size < SmallForDictionary) {
2278 2279 smallCoalDeath(size);
2279 2280 } else {
2280 2281 dictionary()->dict_census_udpate(size,
2281 2282 false /* split */,
2282 2283 false /* birth */);
2283 2284 }
2284 2285 }
2285 2286
2286 2287 void CompactibleFreeListSpace::smallSplitBirth(size_t size) {
2287 2288 assert(size < SmallForDictionary, "Size too large for indexed list");
2288 2289 FreeList<FreeChunk> *fl = &_indexedFreeList[size];
2289 2290 fl->increment_split_births();
2290 2291 fl->increment_surplus();
2291 2292 }
2292 2293
2293 2294 void CompactibleFreeListSpace::smallSplitDeath(size_t size) {
2294 2295 assert(size < SmallForDictionary, "Size too large for indexed list");
2295 2296 FreeList<FreeChunk> *fl = &_indexedFreeList[size];
2296 2297 fl->increment_split_deaths();
2297 2298 fl->decrement_surplus();
2298 2299 }
2299 2300
2300 2301 void CompactibleFreeListSpace::split_birth(size_t size) {
2301 2302 if (size < SmallForDictionary) {
2302 2303 smallSplitBirth(size);
2303 2304 } else {
2304 2305 dictionary()->dict_census_udpate(size,
2305 2306 true /* split */,
2306 2307 true /* birth */);
2307 2308 }
2308 2309 }
2309 2310
2310 2311 void CompactibleFreeListSpace::splitDeath(size_t size) {
2311 2312 if (size < SmallForDictionary) {
2312 2313 smallSplitDeath(size);
2313 2314 } else {
2314 2315 dictionary()->dict_census_udpate(size,
2315 2316 true /* split */,
2316 2317 false /* birth */);
2317 2318 }
2318 2319 }
2319 2320
2320 2321 void CompactibleFreeListSpace::split(size_t from, size_t to1) {
2321 2322 size_t to2 = from - to1;
2322 2323 splitDeath(from);
2323 2324 split_birth(to1);
2324 2325 split_birth(to2);
2325 2326 }
2326 2327
2327 2328 void CompactibleFreeListSpace::print() const {
2328 2329 print_on(tty);
2329 2330 }
2330 2331
2331 2332 void CompactibleFreeListSpace::prepare_for_verify() {
2332 2333 assert_locked();
2333 2334 repairLinearAllocationBlocks();
2334 2335 // Verify that the SpoolBlocks look like free blocks of
2335 2336 // appropriate sizes... To be done ...
2336 2337 }
2337 2338
2338 2339 class VerifyAllBlksClosure: public BlkClosure {
2339 2340 private:
2340 2341 const CompactibleFreeListSpace* _sp;
2341 2342 const MemRegion _span;
2342 2343 HeapWord* _last_addr;
2343 2344 size_t _last_size;
2344 2345 bool _last_was_obj;
2345 2346 bool _last_was_live;
2346 2347
2347 2348 public:
2348 2349 VerifyAllBlksClosure(const CompactibleFreeListSpace* sp,
2349 2350 MemRegion span) : _sp(sp), _span(span),
2350 2351 _last_addr(NULL), _last_size(0),
2351 2352 _last_was_obj(false), _last_was_live(false) { }
2352 2353
2353 2354 virtual size_t do_blk(HeapWord* addr) {
2354 2355 size_t res;
2355 2356 bool was_obj = false;
2356 2357 bool was_live = false;
2357 2358 if (_sp->block_is_obj(addr)) {
2358 2359 was_obj = true;
2359 2360 oop p = oop(addr);
2360 2361 guarantee(p->is_oop(), "Should be an oop");
2361 2362 res = _sp->adjustObjectSize(p->size());
2362 2363 if (_sp->obj_is_alive(addr)) {
2363 2364 was_live = true;
2364 2365 p->verify();
2365 2366 }
2366 2367 } else {
2367 2368 FreeChunk* fc = (FreeChunk*)addr;
2368 2369 res = fc->size();
2369 2370 if (FLSVerifyLists && !fc->cantCoalesce()) {
2370 2371 guarantee(_sp->verify_chunk_in_free_list(fc),
2371 2372 "Chunk should be on a free list");
2372 2373 }
2373 2374 }
2374 2375 if (res == 0) {
2375 2376 gclog_or_tty->print_cr("Livelock: no rank reduction!");
2376 2377 gclog_or_tty->print_cr(
2377 2378 " Current: addr = " PTR_FORMAT ", size = " SIZE_FORMAT ", obj = %s, live = %s \n"
2378 2379 " Previous: addr = " PTR_FORMAT ", size = " SIZE_FORMAT ", obj = %s, live = %s \n",
2379 2380 addr, res, was_obj ?"true":"false", was_live ?"true":"false",
2380 2381 _last_addr, _last_size, _last_was_obj?"true":"false", _last_was_live?"true":"false");
2381 2382 _sp->print_on(gclog_or_tty);
2382 2383 guarantee(false, "Seppuku!");
2383 2384 }
2384 2385 _last_addr = addr;
2385 2386 _last_size = res;
2386 2387 _last_was_obj = was_obj;
2387 2388 _last_was_live = was_live;
2388 2389 return res;
2389 2390 }
2390 2391 };
2391 2392
2392 2393 class VerifyAllOopsClosure: public OopClosure {
2393 2394 private:
2394 2395 const CMSCollector* _collector;
2395 2396 const CompactibleFreeListSpace* _sp;
2396 2397 const MemRegion _span;
2397 2398 const bool _past_remark;
2398 2399 const CMSBitMap* _bit_map;
2399 2400
2400 2401 protected:
2401 2402 void do_oop(void* p, oop obj) {
2402 2403 if (_span.contains(obj)) { // the interior oop points into CMS heap
2403 2404 if (!_span.contains(p)) { // reference from outside CMS heap
2404 2405 // Should be a valid object; the first disjunct below allows
2405 2406 // us to sidestep an assertion in block_is_obj() that insists
2406 2407 // that p be in _sp. Note that several generations (and spaces)
2407 2408 // are spanned by _span (CMS heap) above.
2408 2409 guarantee(!_sp->is_in_reserved(obj) ||
2409 2410 _sp->block_is_obj((HeapWord*)obj),
2410 2411 "Should be an object");
2411 2412 guarantee(obj->is_oop(), "Should be an oop");
2412 2413 obj->verify();
2413 2414 if (_past_remark) {
2414 2415 // Remark has been completed, the object should be marked
2415 2416 _bit_map->isMarked((HeapWord*)obj);
2416 2417 }
2417 2418 } else { // reference within CMS heap
2418 2419 if (_past_remark) {
2419 2420 // Remark has been completed -- so the referent should have
2420 2421 // been marked, if referring object is.
2421 2422 if (_bit_map->isMarked(_collector->block_start(p))) {
2422 2423 guarantee(_bit_map->isMarked((HeapWord*)obj), "Marking error?");
2423 2424 }
2424 2425 }
2425 2426 }
2426 2427 } else if (_sp->is_in_reserved(p)) {
2427 2428 // the reference is from FLS, and points out of FLS
2428 2429 guarantee(obj->is_oop(), "Should be an oop");
2429 2430 obj->verify();
2430 2431 }
2431 2432 }
2432 2433
2433 2434 template <class T> void do_oop_work(T* p) {
2434 2435 T heap_oop = oopDesc::load_heap_oop(p);
2435 2436 if (!oopDesc::is_null(heap_oop)) {
2436 2437 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
2437 2438 do_oop(p, obj);
2438 2439 }
2439 2440 }
2440 2441
2441 2442 public:
2442 2443 VerifyAllOopsClosure(const CMSCollector* collector,
2443 2444 const CompactibleFreeListSpace* sp, MemRegion span,
2444 2445 bool past_remark, CMSBitMap* bit_map) :
2445 2446 OopClosure(), _collector(collector), _sp(sp), _span(span),
2446 2447 _past_remark(past_remark), _bit_map(bit_map) { }
2447 2448
2448 2449 virtual void do_oop(oop* p) { VerifyAllOopsClosure::do_oop_work(p); }
2449 2450 virtual void do_oop(narrowOop* p) { VerifyAllOopsClosure::do_oop_work(p); }
2450 2451 };
2451 2452
2452 2453 void CompactibleFreeListSpace::verify() const {
2453 2454 assert_lock_strong(&_freelistLock);
2454 2455 verify_objects_initialized();
2455 2456 MemRegion span = _collector->_span;
2456 2457 bool past_remark = (_collector->abstract_state() ==
2457 2458 CMSCollector::Sweeping);
2458 2459
2459 2460 ResourceMark rm;
2460 2461 HandleMark hm;
2461 2462
2462 2463 // Check integrity of CFL data structures
2463 2464 _promoInfo.verify();
2464 2465 _dictionary->verify();
2465 2466 if (FLSVerifyIndexTable) {
2466 2467 verifyIndexedFreeLists();
2467 2468 }
2468 2469 // Check integrity of all objects and free blocks in space
2469 2470 {
2470 2471 VerifyAllBlksClosure cl(this, span);
2471 2472 ((CompactibleFreeListSpace*)this)->blk_iterate(&cl); // cast off const
2472 2473 }
2473 2474 // Check that all references in the heap to FLS
2474 2475 // are to valid objects in FLS or that references in
2475 2476 // FLS are to valid objects elsewhere in the heap
2476 2477 if (FLSVerifyAllHeapReferences)
2477 2478 {
2478 2479 VerifyAllOopsClosure cl(_collector, this, span, past_remark,
2479 2480 _collector->markBitMap());
2480 2481 CollectedHeap* ch = Universe::heap();
2481 2482 ch->oop_iterate(&cl); // all oops in generations
2482 2483 ch->permanent_oop_iterate(&cl); // all oops in perm gen
2483 2484 }
2484 2485
2485 2486 if (VerifyObjectStartArray) {
2486 2487 // Verify the block offset table
2487 2488 _bt.verify();
2488 2489 }
2489 2490 }
2490 2491
2491 2492 #ifndef PRODUCT
2492 2493 void CompactibleFreeListSpace::verifyFreeLists() const {
2493 2494 if (FLSVerifyLists) {
2494 2495 _dictionary->verify();
2495 2496 verifyIndexedFreeLists();
2496 2497 } else {
2497 2498 if (FLSVerifyDictionary) {
2498 2499 _dictionary->verify();
2499 2500 }
2500 2501 if (FLSVerifyIndexTable) {
2501 2502 verifyIndexedFreeLists();
2502 2503 }
2503 2504 }
2504 2505 }
2505 2506 #endif
2506 2507
2507 2508 void CompactibleFreeListSpace::verifyIndexedFreeLists() const {
2508 2509 size_t i = 0;
2509 2510 for (; i < IndexSetStart; i++) {
2510 2511 guarantee(_indexedFreeList[i].head() == NULL, "should be NULL");
2511 2512 }
2512 2513 for (; i < IndexSetSize; i++) {
2513 2514 verifyIndexedFreeList(i);
2514 2515 }
2515 2516 }
2516 2517
2517 2518 void CompactibleFreeListSpace::verifyIndexedFreeList(size_t size) const {
2518 2519 FreeChunk* fc = _indexedFreeList[size].head();
2519 2520 FreeChunk* tail = _indexedFreeList[size].tail();
2520 2521 size_t num = _indexedFreeList[size].count();
2521 2522 size_t n = 0;
2522 2523 guarantee(((size >= IndexSetStart) && (size % IndexSetStride == 0)) || fc == NULL,
2523 2524 "Slot should have been empty");
2524 2525 for (; fc != NULL; fc = fc->next(), n++) {
2525 2526 guarantee(fc->size() == size, "Size inconsistency");
2526 2527 guarantee(fc->is_free(), "!free?");
2527 2528 guarantee(fc->next() == NULL || fc->next()->prev() == fc, "Broken list");
2528 2529 guarantee((fc->next() == NULL) == (fc == tail), "Incorrect tail");
2529 2530 }
2530 2531 guarantee(n == num, "Incorrect count");
2531 2532 }
2532 2533
2533 2534 #ifndef PRODUCT
2534 2535 void CompactibleFreeListSpace::check_free_list_consistency() const {
2535 2536 assert(_dictionary->min_size() <= IndexSetSize,
2536 2537 "Some sizes can't be allocated without recourse to"
2537 2538 " linear allocation buffers");
2538 2539 assert(BinaryTreeDictionary<FreeChunk>::min_tree_chunk_size*HeapWordSize == sizeof(TreeChunk<FreeChunk>),
2539 2540 "else MIN_TREE_CHUNK_SIZE is wrong");
2540 2541 assert(IndexSetStart != 0, "IndexSetStart not initialized");
2541 2542 assert(IndexSetStride != 0, "IndexSetStride not initialized");
2542 2543 }
2543 2544 #endif
2544 2545
2545 2546 void CompactibleFreeListSpace::printFLCensus(size_t sweep_count) const {
2546 2547 assert_lock_strong(&_freelistLock);
2547 2548 FreeList<FreeChunk> total;
2548 2549 gclog_or_tty->print("end sweep# " SIZE_FORMAT "\n", sweep_count);
2549 2550 FreeList<FreeChunk>::print_labels_on(gclog_or_tty, "size");
2550 2551 size_t total_free = 0;
2551 2552 for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2552 2553 const FreeList<FreeChunk> *fl = &_indexedFreeList[i];
2553 2554 total_free += fl->count() * fl->size();
2554 2555 if (i % (40*IndexSetStride) == 0) {
2555 2556 FreeList<FreeChunk>::print_labels_on(gclog_or_tty, "size");
2556 2557 }
2557 2558 fl->print_on(gclog_or_tty);
2558 2559 total.set_bfr_surp( total.bfr_surp() + fl->bfr_surp() );
2559 2560 total.set_surplus( total.surplus() + fl->surplus() );
2560 2561 total.set_desired( total.desired() + fl->desired() );
2561 2562 total.set_prev_sweep( total.prev_sweep() + fl->prev_sweep() );
2562 2563 total.set_before_sweep(total.before_sweep() + fl->before_sweep());
2563 2564 total.set_count( total.count() + fl->count() );
2564 2565 total.set_coal_births( total.coal_births() + fl->coal_births() );
2565 2566 total.set_coal_deaths( total.coal_deaths() + fl->coal_deaths() );
2566 2567 total.set_split_births(total.split_births() + fl->split_births());
2567 2568 total.set_split_deaths(total.split_deaths() + fl->split_deaths());
2568 2569 }
2569 2570 total.print_on(gclog_or_tty, "TOTAL");
2570 2571 gclog_or_tty->print_cr("Total free in indexed lists "
2571 2572 SIZE_FORMAT " words", total_free);
2572 2573 gclog_or_tty->print("growth: %8.5f deficit: %8.5f\n",
2573 2574 (double)(total.split_births()+total.coal_births()-total.split_deaths()-total.coal_deaths())/
2574 2575 (total.prev_sweep() != 0 ? (double)total.prev_sweep() : 1.0),
2575 2576 (double)(total.desired() - total.count())/(total.desired() != 0 ? (double)total.desired() : 1.0));
2576 2577 _dictionary->print_dict_census();
2577 2578 }
2578 2579
2579 2580 ///////////////////////////////////////////////////////////////////////////
2580 2581 // CFLS_LAB
2581 2582 ///////////////////////////////////////////////////////////////////////////
2582 2583
2583 2584 #define VECTOR_257(x) \
2584 2585 /* 1 2 3 4 5 6 7 8 9 1x 11 12 13 14 15 16 17 18 19 2x 21 22 23 24 25 26 27 28 29 3x 31 32 */ \
2585 2586 { x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, \
2586 2587 x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, \
2587 2588 x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, \
2588 2589 x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, \
2589 2590 x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, \
2590 2591 x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, \
2591 2592 x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, \
2592 2593 x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, \
2593 2594 x }
2594 2595
2595 2596 // Initialize with default setting of CMSParPromoteBlocksToClaim, _not_
2596 2597 // OldPLABSize, whose static default is different; if overridden at the
2597 2598 // command-line, this will get reinitialized via a call to
2598 2599 // modify_initialization() below.
2599 2600 AdaptiveWeightedAverage CFLS_LAB::_blocks_to_claim[] =
2600 2601 VECTOR_257(AdaptiveWeightedAverage(OldPLABWeight, (float)CMSParPromoteBlocksToClaim));
2601 2602 size_t CFLS_LAB::_global_num_blocks[] = VECTOR_257(0);
2602 2603 uint CFLS_LAB::_global_num_workers[] = VECTOR_257(0);
2603 2604
2604 2605 CFLS_LAB::CFLS_LAB(CompactibleFreeListSpace* cfls) :
2605 2606 _cfls(cfls)
2606 2607 {
2607 2608 assert(CompactibleFreeListSpace::IndexSetSize == 257, "Modify VECTOR_257() macro above");
2608 2609 for (size_t i = CompactibleFreeListSpace::IndexSetStart;
2609 2610 i < CompactibleFreeListSpace::IndexSetSize;
2610 2611 i += CompactibleFreeListSpace::IndexSetStride) {
2611 2612 _indexedFreeList[i].set_size(i);
2612 2613 _num_blocks[i] = 0;
2613 2614 }
2614 2615 }
2615 2616
2616 2617 static bool _CFLS_LAB_modified = false;
2617 2618
2618 2619 void CFLS_LAB::modify_initialization(size_t n, unsigned wt) {
2619 2620 assert(!_CFLS_LAB_modified, "Call only once");
2620 2621 _CFLS_LAB_modified = true;
2621 2622 for (size_t i = CompactibleFreeListSpace::IndexSetStart;
2622 2623 i < CompactibleFreeListSpace::IndexSetSize;
2623 2624 i += CompactibleFreeListSpace::IndexSetStride) {
2624 2625 _blocks_to_claim[i].modify(n, wt, true /* force */);
2625 2626 }
2626 2627 }
2627 2628
2628 2629 HeapWord* CFLS_LAB::alloc(size_t word_sz) {
2629 2630 FreeChunk* res;
2630 2631 assert(word_sz == _cfls->adjustObjectSize(word_sz), "Error");
2631 2632 if (word_sz >= CompactibleFreeListSpace::IndexSetSize) {
2632 2633 // This locking manages sync with other large object allocations.
2633 2634 MutexLockerEx x(_cfls->parDictionaryAllocLock(),
2634 2635 Mutex::_no_safepoint_check_flag);
2635 2636 res = _cfls->getChunkFromDictionaryExact(word_sz);
2636 2637 if (res == NULL) return NULL;
2637 2638 } else {
2638 2639 FreeList<FreeChunk>* fl = &_indexedFreeList[word_sz];
2639 2640 if (fl->count() == 0) {
2640 2641 // Attempt to refill this local free list.
2641 2642 get_from_global_pool(word_sz, fl);
2642 2643 // If it didn't work, give up.
2643 2644 if (fl->count() == 0) return NULL;
2644 2645 }
2645 2646 res = fl->get_chunk_at_head();
2646 2647 assert(res != NULL, "Why was count non-zero?");
2647 2648 }
2648 2649 res->markNotFree();
2649 2650 assert(!res->is_free(), "shouldn't be marked free");
2650 2651 assert(oop(res)->klass_or_null() == NULL, "should look uninitialized");
2651 2652 // mangle a just allocated object with a distinct pattern.
2652 2653 debug_only(res->mangleAllocated(word_sz));
2653 2654 return (HeapWord*)res;
2654 2655 }
2655 2656
2656 2657 // Get a chunk of blocks of the right size and update related
2657 2658 // book-keeping stats
2658 2659 void CFLS_LAB::get_from_global_pool(size_t word_sz, FreeList<FreeChunk>* fl) {
2659 2660 // Get the #blocks we want to claim
2660 2661 size_t n_blks = (size_t)_blocks_to_claim[word_sz].average();
2661 2662 assert(n_blks > 0, "Error");
2662 2663 assert(ResizePLAB || n_blks == OldPLABSize, "Error");
2663 2664 // In some cases, when the application has a phase change,
2664 2665 // there may be a sudden and sharp shift in the object survival
2665 2666 // profile, and updating the counts at the end of a scavenge
2666 2667 // may not be quick enough, giving rise to large scavenge pauses
2667 2668 // during these phase changes. It is beneficial to detect such
2668 2669 // changes on-the-fly during a scavenge and avoid such a phase-change
2669 2670 // pothole. The following code is a heuristic attempt to do that.
2670 2671 // It is protected by a product flag until we have gained
2671 2672 // enough experience with this heuristic and fine-tuned its behaviour.
2672 2673 // WARNING: This might increase fragmentation if we overreact to
2673 2674 // small spikes, so some kind of historical smoothing based on
2674 2675 // previous experience with the greater reactivity might be useful.
2675 2676 // Lacking sufficient experience, CMSOldPLABResizeQuicker is disabled by
2676 2677 // default.
2677 2678 if (ResizeOldPLAB && CMSOldPLABResizeQuicker) {
2678 2679 size_t multiple = _num_blocks[word_sz]/(CMSOldPLABToleranceFactor*CMSOldPLABNumRefills*n_blks);
2679 2680 n_blks += CMSOldPLABReactivityFactor*multiple*n_blks;
2680 2681 n_blks = MIN2(n_blks, CMSOldPLABMax);
2681 2682 }
2682 2683 assert(n_blks > 0, "Error");
2683 2684 _cfls->par_get_chunk_of_blocks(word_sz, n_blks, fl);
2684 2685 // Update stats table entry for this block size
2685 2686 _num_blocks[word_sz] += fl->count();
2686 2687 }
2687 2688
2688 2689 void CFLS_LAB::compute_desired_plab_size() {
2689 2690 for (size_t i = CompactibleFreeListSpace::IndexSetStart;
2690 2691 i < CompactibleFreeListSpace::IndexSetSize;
2691 2692 i += CompactibleFreeListSpace::IndexSetStride) {
2692 2693 assert((_global_num_workers[i] == 0) == (_global_num_blocks[i] == 0),
2693 2694 "Counter inconsistency");
2694 2695 if (_global_num_workers[i] > 0) {
2695 2696 // Need to smooth wrt historical average
2696 2697 if (ResizeOldPLAB) {
2697 2698 _blocks_to_claim[i].sample(
2698 2699 MAX2((size_t)CMSOldPLABMin,
2699 2700 MIN2((size_t)CMSOldPLABMax,
2700 2701 _global_num_blocks[i]/(_global_num_workers[i]*CMSOldPLABNumRefills))));
2701 2702 }
2702 2703 // Reset counters for next round
2703 2704 _global_num_workers[i] = 0;
2704 2705 _global_num_blocks[i] = 0;
2705 2706 if (PrintOldPLAB) {
2706 2707 gclog_or_tty->print_cr("[%d]: %d", i, (size_t)_blocks_to_claim[i].average());
2707 2708 }
2708 2709 }
2709 2710 }
2710 2711 }
2711 2712
2712 2713 // If this is changed in the future to allow parallel
2713 2714 // access, one would need to take the FL locks and,
2714 2715 // depending on how it is used, stagger access from
2715 2716 // parallel threads to reduce contention.
2716 2717 void CFLS_LAB::retire(int tid) {
2717 2718 // We run this single threaded with the world stopped;
2718 2719 // so no need for locks and such.
2719 2720 NOT_PRODUCT(Thread* t = Thread::current();)
2720 2721 assert(Thread::current()->is_VM_thread(), "Error");
2721 2722 for (size_t i = CompactibleFreeListSpace::IndexSetStart;
2722 2723 i < CompactibleFreeListSpace::IndexSetSize;
2723 2724 i += CompactibleFreeListSpace::IndexSetStride) {
2724 2725 assert(_num_blocks[i] >= (size_t)_indexedFreeList[i].count(),
2725 2726 "Can't retire more than what we obtained");
2726 2727 if (_num_blocks[i] > 0) {
2727 2728 size_t num_retire = _indexedFreeList[i].count();
2728 2729 assert(_num_blocks[i] > num_retire, "Should have used at least one");
2729 2730 {
2730 2731 // MutexLockerEx x(_cfls->_indexedFreeListParLocks[i],
2731 2732 // Mutex::_no_safepoint_check_flag);
2732 2733
2733 2734 // Update globals stats for num_blocks used
2734 2735 _global_num_blocks[i] += (_num_blocks[i] - num_retire);
2735 2736 _global_num_workers[i]++;
2736 2737 assert(_global_num_workers[i] <= ParallelGCThreads, "Too big");
2737 2738 if (num_retire > 0) {
2738 2739 _cfls->_indexedFreeList[i].prepend(&_indexedFreeList[i]);
2739 2740 // Reset this list.
2740 2741 _indexedFreeList[i] = FreeList<FreeChunk>();
2741 2742 _indexedFreeList[i].set_size(i);
2742 2743 }
2743 2744 }
2744 2745 if (PrintOldPLAB) {
2745 2746 gclog_or_tty->print_cr("%d[%d]: %d/%d/%d",
2746 2747 tid, i, num_retire, _num_blocks[i], (size_t)_blocks_to_claim[i].average());
2747 2748 }
2748 2749 // Reset stats for next round
2749 2750 _num_blocks[i] = 0;
2750 2751 }
2751 2752 }
2752 2753 }
2753 2754
2754 2755 void CompactibleFreeListSpace:: par_get_chunk_of_blocks(size_t word_sz, size_t n, FreeList<FreeChunk>* fl) {
2755 2756 assert(fl->count() == 0, "Precondition.");
2756 2757 assert(word_sz < CompactibleFreeListSpace::IndexSetSize,
2757 2758 "Precondition");
2758 2759
2759 2760 // We'll try all multiples of word_sz in the indexed set, starting with
2760 2761 // word_sz itself and, if CMSSplitIndexedFreeListBlocks, try larger multiples,
2761 2762 // then try getting a big chunk and splitting it.
2762 2763 {
2763 2764 bool found;
2764 2765 int k;
2765 2766 size_t cur_sz;
2766 2767 for (k = 1, cur_sz = k * word_sz, found = false;
2767 2768 (cur_sz < CompactibleFreeListSpace::IndexSetSize) &&
2768 2769 (CMSSplitIndexedFreeListBlocks || k <= 1);
2769 2770 k++, cur_sz = k * word_sz) {
2770 2771 FreeList<FreeChunk> fl_for_cur_sz; // Empty.
2771 2772 fl_for_cur_sz.set_size(cur_sz);
2772 2773 {
2773 2774 MutexLockerEx x(_indexedFreeListParLocks[cur_sz],
2774 2775 Mutex::_no_safepoint_check_flag);
2775 2776 FreeList<FreeChunk>* gfl = &_indexedFreeList[cur_sz];
2776 2777 if (gfl->count() != 0) {
2777 2778 // nn is the number of chunks of size cur_sz that
2778 2779 // we'd need to split k-ways each, in order to create
2779 2780 // "n" chunks of size word_sz each.
2780 2781 const size_t nn = MAX2(n/k, (size_t)1);
2781 2782 gfl->getFirstNChunksFromList(nn, &fl_for_cur_sz);
2782 2783 found = true;
2783 2784 if (k > 1) {
2784 2785 // Update split death stats for the cur_sz-size blocks list:
2785 2786 // we increment the split death count by the number of blocks
2786 2787 // we just took from the cur_sz-size blocks list and which
2787 2788 // we will be splitting below.
2788 2789 ssize_t deaths = gfl->split_deaths() +
2789 2790 fl_for_cur_sz.count();
2790 2791 gfl->set_split_deaths(deaths);
2791 2792 }
2792 2793 }
2793 2794 }
2794 2795 // Now transfer fl_for_cur_sz to fl. Common case, we hope, is k = 1.
2795 2796 if (found) {
2796 2797 if (k == 1) {
2797 2798 fl->prepend(&fl_for_cur_sz);
2798 2799 } else {
2799 2800 // Divide each block on fl_for_cur_sz up k ways.
2800 2801 FreeChunk* fc;
2801 2802 while ((fc = fl_for_cur_sz.get_chunk_at_head()) != NULL) {
2802 2803 // Must do this in reverse order, so that anybody attempting to
2803 2804 // access the main chunk sees it as a single free block until we
2804 2805 // change it.
2805 2806 size_t fc_size = fc->size();
2806 2807 assert(fc->is_free(), "Error");
2807 2808 for (int i = k-1; i >= 0; i--) {
2808 2809 FreeChunk* ffc = (FreeChunk*)((HeapWord*)fc + i * word_sz);
2809 2810 assert((i != 0) ||
2810 2811 ((fc == ffc) && ffc->is_free() &&
2811 2812 (ffc->size() == k*word_sz) && (fc_size == word_sz)),
2812 2813 "Counting error");
2813 2814 ffc->set_size(word_sz);
2814 2815 ffc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
2815 2816 ffc->link_next(NULL);
2816 2817 // Above must occur before BOT is updated below.
2817 2818 OrderAccess::storestore();
2818 2819 // splitting from the right, fc_size == i * word_sz
2819 2820 _bt.mark_block((HeapWord*)ffc, word_sz, true /* reducing */);
2820 2821 fc_size -= word_sz;
2821 2822 assert(fc_size == i*word_sz, "Error");
2822 2823 _bt.verify_not_unallocated((HeapWord*)ffc, word_sz);
2823 2824 _bt.verify_single_block((HeapWord*)fc, fc_size);
2824 2825 _bt.verify_single_block((HeapWord*)ffc, word_sz);
2825 2826 // Push this on "fl".
2826 2827 fl->return_chunk_at_head(ffc);
2827 2828 }
2828 2829 // TRAP
2829 2830 assert(fl->tail()->next() == NULL, "List invariant.");
2830 2831 }
2831 2832 }
2832 2833 // Update birth stats for this block size.
2833 2834 size_t num = fl->count();
2834 2835 MutexLockerEx x(_indexedFreeListParLocks[word_sz],
2835 2836 Mutex::_no_safepoint_check_flag);
2836 2837 ssize_t births = _indexedFreeList[word_sz].split_births() + num;
2837 2838 _indexedFreeList[word_sz].set_split_births(births);
2838 2839 return;
2839 2840 }
2840 2841 }
2841 2842 }
2842 2843 // Otherwise, we'll split a block from the dictionary.
2843 2844 FreeChunk* fc = NULL;
2844 2845 FreeChunk* rem_fc = NULL;
2845 2846 size_t rem;
2846 2847 {
2847 2848 MutexLockerEx x(parDictionaryAllocLock(),
2848 2849 Mutex::_no_safepoint_check_flag);
2849 2850 while (n > 0) {
2850 2851 fc = dictionary()->get_chunk(MAX2(n * word_sz,
2851 2852 _dictionary->min_size()),
2852 2853 FreeBlockDictionary<FreeChunk>::atLeast);
2853 2854 if (fc != NULL) {
2854 2855 _bt.allocated((HeapWord*)fc, fc->size(), true /* reducing */); // update _unallocated_blk
2855 2856 dictionary()->dict_census_udpate(fc->size(),
2856 2857 true /*split*/,
2857 2858 false /*birth*/);
2858 2859 break;
2859 2860 } else {
2860 2861 n--;
2861 2862 }
2862 2863 }
2863 2864 if (fc == NULL) return;
2864 2865 // Otherwise, split up that block.
2865 2866 assert((ssize_t)n >= 1, "Control point invariant");
2866 2867 assert(fc->is_free(), "Error: should be a free block");
2867 2868 _bt.verify_single_block((HeapWord*)fc, fc->size());
2868 2869 const size_t nn = fc->size() / word_sz;
2869 2870 n = MIN2(nn, n);
2870 2871 assert((ssize_t)n >= 1, "Control point invariant");
2871 2872 rem = fc->size() - n * word_sz;
2872 2873 // If there is a remainder, and it's too small, allocate one fewer.
2873 2874 if (rem > 0 && rem < MinChunkSize) {
2874 2875 n--; rem += word_sz;
2875 2876 }
2876 2877 // Note that at this point we may have n == 0.
2877 2878 assert((ssize_t)n >= 0, "Control point invariant");
2878 2879
2879 2880 // If n is 0, the chunk fc that was found is not large
2880 2881 // enough to leave a viable remainder. We are unable to
2881 2882 // allocate even one block. Return fc to the
2882 2883 // dictionary and return, leaving "fl" empty.
2883 2884 if (n == 0) {
2884 2885 returnChunkToDictionary(fc);
2885 2886 assert(fl->count() == 0, "We never allocated any blocks");
2886 2887 return;
2887 2888 }
2888 2889
2889 2890 // First return the remainder, if any.
2890 2891 // Note that we hold the lock until we decide if we're going to give
2891 2892 // back the remainder to the dictionary, since a concurrent allocation
2892 2893 // may otherwise see the heap as empty. (We're willing to take that
2893 2894 // hit if the block is a small block.)
2894 2895 if (rem > 0) {
2895 2896 size_t prefix_size = n * word_sz;
2896 2897 rem_fc = (FreeChunk*)((HeapWord*)fc + prefix_size);
2897 2898 rem_fc->set_size(rem);
2898 2899 rem_fc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
2899 2900 rem_fc->link_next(NULL);
2900 2901 // Above must occur before BOT is updated below.
2901 2902 assert((ssize_t)n > 0 && prefix_size > 0 && rem_fc > fc, "Error");
2902 2903 OrderAccess::storestore();
2903 2904 _bt.split_block((HeapWord*)fc, fc->size(), prefix_size);
2904 2905 assert(fc->is_free(), "Error");
2905 2906 fc->set_size(prefix_size);
2906 2907 if (rem >= IndexSetSize) {
2907 2908 returnChunkToDictionary(rem_fc);
2908 2909 dictionary()->dict_census_udpate(rem, true /*split*/, true /*birth*/);
2909 2910 rem_fc = NULL;
2910 2911 }
2911 2912 // Otherwise, return it to the small list below.
2912 2913 }
2913 2914 }
2914 2915 if (rem_fc != NULL) {
2915 2916 MutexLockerEx x(_indexedFreeListParLocks[rem],
2916 2917 Mutex::_no_safepoint_check_flag);
2917 2918 _bt.verify_not_unallocated((HeapWord*)rem_fc, rem_fc->size());
2918 2919 _indexedFreeList[rem].return_chunk_at_head(rem_fc);
2919 2920 smallSplitBirth(rem);
2920 2921 }
2921 2922 assert((ssize_t)n > 0 && fc != NULL, "Consistency");
2922 2923 // Now do the splitting up.
2923 2924 // Must do this in reverse order, so that anybody attempting to
2924 2925 // access the main chunk sees it as a single free block until we
2925 2926 // change it.
2926 2927 size_t fc_size = n * word_sz;
2927 2928 // All but first chunk in this loop
2928 2929 for (ssize_t i = n-1; i > 0; i--) {
2929 2930 FreeChunk* ffc = (FreeChunk*)((HeapWord*)fc + i * word_sz);
2930 2931 ffc->set_size(word_sz);
2931 2932 ffc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
2932 2933 ffc->link_next(NULL);
2933 2934 // Above must occur before BOT is updated below.
2934 2935 OrderAccess::storestore();
2935 2936 // splitting from the right, fc_size == (n - i + 1) * wordsize
2936 2937 _bt.mark_block((HeapWord*)ffc, word_sz, true /* reducing */);
2937 2938 fc_size -= word_sz;
2938 2939 _bt.verify_not_unallocated((HeapWord*)ffc, ffc->size());
2939 2940 _bt.verify_single_block((HeapWord*)ffc, ffc->size());
2940 2941 _bt.verify_single_block((HeapWord*)fc, fc_size);
2941 2942 // Push this on "fl".
2942 2943 fl->return_chunk_at_head(ffc);
2943 2944 }
2944 2945 // First chunk
2945 2946 assert(fc->is_free() && fc->size() == n*word_sz, "Error: should still be a free block");
2946 2947 // The blocks above should show their new sizes before the first block below
2947 2948 fc->set_size(word_sz);
2948 2949 fc->link_prev(NULL); // idempotent wrt free-ness, see assert above
2949 2950 fc->link_next(NULL);
2950 2951 _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
2951 2952 _bt.verify_single_block((HeapWord*)fc, fc->size());
2952 2953 fl->return_chunk_at_head(fc);
2953 2954
2954 2955 assert((ssize_t)n > 0 && (ssize_t)n == fl->count(), "Incorrect number of blocks");
2955 2956 {
2956 2957 // Update the stats for this block size.
2957 2958 MutexLockerEx x(_indexedFreeListParLocks[word_sz],
2958 2959 Mutex::_no_safepoint_check_flag);
2959 2960 const ssize_t births = _indexedFreeList[word_sz].split_births() + n;
2960 2961 _indexedFreeList[word_sz].set_split_births(births);
2961 2962 // ssize_t new_surplus = _indexedFreeList[word_sz].surplus() + n;
2962 2963 // _indexedFreeList[word_sz].set_surplus(new_surplus);
2963 2964 }
2964 2965
2965 2966 // TRAP
2966 2967 assert(fl->tail()->next() == NULL, "List invariant.");
2967 2968 }
2968 2969
2969 2970 // Set up the space's par_seq_tasks structure for work claiming
2970 2971 // for parallel rescan. See CMSParRemarkTask where this is currently used.
2971 2972 // XXX Need to suitably abstract and generalize this and the next
2972 2973 // method into one.
2973 2974 void
2974 2975 CompactibleFreeListSpace::
2975 2976 initialize_sequential_subtasks_for_rescan(int n_threads) {
2976 2977 // The "size" of each task is fixed according to rescan_task_size.
2977 2978 assert(n_threads > 0, "Unexpected n_threads argument");
2978 2979 const size_t task_size = rescan_task_size();
2979 2980 size_t n_tasks = (used_region().word_size() + task_size - 1)/task_size;
2980 2981 assert((n_tasks == 0) == used_region().is_empty(), "n_tasks incorrect");
2981 2982 assert(n_tasks == 0 ||
2982 2983 ((used_region().start() + (n_tasks - 1)*task_size < used_region().end()) &&
2983 2984 (used_region().start() + n_tasks*task_size >= used_region().end())),
2984 2985 "n_tasks calculation incorrect");
2985 2986 SequentialSubTasksDone* pst = conc_par_seq_tasks();
2986 2987 assert(!pst->valid(), "Clobbering existing data?");
2987 2988 // Sets the condition for completion of the subtask (how many threads
2988 2989 // need to finish in order to be done).
2989 2990 pst->set_n_threads(n_threads);
2990 2991 pst->set_n_tasks((int)n_tasks);
2991 2992 }
2992 2993
2993 2994 // Set up the space's par_seq_tasks structure for work claiming
2994 2995 // for parallel concurrent marking. See CMSConcMarkTask where this is currently used.
2995 2996 void
2996 2997 CompactibleFreeListSpace::
2997 2998 initialize_sequential_subtasks_for_marking(int n_threads,
2998 2999 HeapWord* low) {
2999 3000 // The "size" of each task is fixed according to rescan_task_size.
3000 3001 assert(n_threads > 0, "Unexpected n_threads argument");
3001 3002 const size_t task_size = marking_task_size();
3002 3003 assert(task_size > CardTableModRefBS::card_size_in_words &&
3003 3004 (task_size % CardTableModRefBS::card_size_in_words == 0),
3004 3005 "Otherwise arithmetic below would be incorrect");
3005 3006 MemRegion span = _gen->reserved();
3006 3007 if (low != NULL) {
3007 3008 if (span.contains(low)) {
3008 3009 // Align low down to a card boundary so that
3009 3010 // we can use block_offset_careful() on span boundaries.
3010 3011 HeapWord* aligned_low = (HeapWord*)align_size_down((uintptr_t)low,
3011 3012 CardTableModRefBS::card_size);
3012 3013 // Clip span prefix at aligned_low
3013 3014 span = span.intersection(MemRegion(aligned_low, span.end()));
3014 3015 } else if (low > span.end()) {
3015 3016 span = MemRegion(low, low); // Null region
3016 3017 } // else use entire span
3017 3018 }
3018 3019 assert(span.is_empty() ||
3019 3020 ((uintptr_t)span.start() % CardTableModRefBS::card_size == 0),
3020 3021 "span should start at a card boundary");
3021 3022 size_t n_tasks = (span.word_size() + task_size - 1)/task_size;
3022 3023 assert((n_tasks == 0) == span.is_empty(), "Inconsistency");
3023 3024 assert(n_tasks == 0 ||
3024 3025 ((span.start() + (n_tasks - 1)*task_size < span.end()) &&
3025 3026 (span.start() + n_tasks*task_size >= span.end())),
3026 3027 "n_tasks calculation incorrect");
3027 3028 SequentialSubTasksDone* pst = conc_par_seq_tasks();
3028 3029 assert(!pst->valid(), "Clobbering existing data?");
3029 3030 // Sets the condition for completion of the subtask (how many threads
3030 3031 // need to finish in order to be done).
3031 3032 pst->set_n_threads(n_threads);
3032 3033 pst->set_n_tasks((int)n_tasks);
3033 3034 }
↓ open down ↓ |
2736 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX