410 // should one, pessimistically for the rare cases when res
411 // calculated above is less than IndexSetSize,
412 // just return res calculated above? My reasoning was that
413 // those cases will be so rare that the extra time spent doesn't
414 // really matter....
415 // Note: do not change the loop test i >= res + IndexSetStride
416 // to i > res below, because i is unsigned and res may be zero.
417 for (size_t i = IndexSetSize - 1; i >= res + IndexSetStride;
418 i -= IndexSetStride) {
419 if (_indexedFreeList[i].head() != NULL) {
420 assert(_indexedFreeList[i].count() != 0, "Inconsistent FreeList");
421 return i;
422 }
423 }
424 return res;
425 }
426
427 void LinearAllocBlock::print_on(outputStream* st) const {
428 st->print_cr(" LinearAllocBlock: ptr = " PTR_FORMAT ", word_size = " SIZE_FORMAT
429 ", refillsize = " SIZE_FORMAT ", allocation_size_limit = " SIZE_FORMAT,
430 _ptr, _word_size, _refillSize, _allocation_size_limit);
431 }
432
433 void CompactibleFreeListSpace::print_on(outputStream* st) const {
434 st->print_cr("COMPACTIBLE FREELIST SPACE");
435 st->print_cr(" Space:");
436 Space::print_on(st);
437
438 st->print_cr("promoInfo:");
439 _promoInfo.print_on(st);
440
441 st->print_cr("_smallLinearAllocBlock");
442 _smallLinearAllocBlock.print_on(st);
443
444 // dump_memory_block(_smallLinearAllocBlock->_ptr, 128);
445
446 st->print_cr(" _fitStrategy = %s, _adaptive_freelists = %s",
447 _fitStrategy?"true":"false", _adaptive_freelists?"true":"false");
448 }
449
450 void CompactibleFreeListSpace::print_indexed_free_lists(outputStream* st)
451 const {
452 reportIndexedFreeListStatistics();
453 gclog_or_tty->print_cr("Layout of Indexed Freelists");
454 gclog_or_tty->print_cr("---------------------------");
455 AdaptiveFreeList<FreeChunk>::print_labels_on(st, "size");
456 for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
457 _indexedFreeList[i].print_on(gclog_or_tty);
458 for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
459 fc = fc->next()) {
460 gclog_or_tty->print_cr("\t[" PTR_FORMAT "," PTR_FORMAT ") %s",
461 fc, (HeapWord*)fc + i,
462 fc->cantCoalesce() ? "\t CC" : "");
463 }
464 }
465 }
466
467 void CompactibleFreeListSpace::print_promo_info_blocks(outputStream* st)
468 const {
469 _promoInfo.print_on(st);
470 }
471
472 void CompactibleFreeListSpace::print_dictionary_free_lists(outputStream* st)
473 const {
474 _dictionary->report_statistics();
475 st->print_cr("Layout of Freelists in Tree");
476 st->print_cr("---------------------------");
477 _dictionary->print_free_lists(st);
478 }
479
480 class BlkPrintingClosure: public BlkClosure {
481 const CMSCollector* _collector;
485 outputStream* _st;
486 public:
487 BlkPrintingClosure(const CMSCollector* collector,
488 const CompactibleFreeListSpace* sp,
489 const CMSBitMap* live_bit_map,
490 outputStream* st):
491 _collector(collector),
492 _sp(sp),
493 _live_bit_map(live_bit_map),
494 _post_remark(collector->abstract_state() > CMSCollector::FinalMarking),
495 _st(st) { }
496 size_t do_blk(HeapWord* addr);
497 };
498
499 size_t BlkPrintingClosure::do_blk(HeapWord* addr) {
500 size_t sz = _sp->block_size_no_stall(addr, _collector);
501 assert(sz != 0, "Should always be able to compute a size");
502 if (_sp->block_is_obj(addr)) {
503 const bool dead = _post_remark && !_live_bit_map->isMarked(addr);
504 _st->print_cr(PTR_FORMAT ": %s object of size " SIZE_FORMAT "%s",
505 addr,
506 dead ? "dead" : "live",
507 sz,
508 (!dead && CMSPrintObjectsInDump) ? ":" : ".");
509 if (CMSPrintObjectsInDump && !dead) {
510 oop(addr)->print_on(_st);
511 _st->print_cr("--------------------------------------");
512 }
513 } else { // free block
514 _st->print_cr(PTR_FORMAT ": free block of size " SIZE_FORMAT "%s",
515 addr, sz, CMSPrintChunksInDump ? ":" : ".");
516 if (CMSPrintChunksInDump) {
517 ((FreeChunk*)addr)->print_on(_st);
518 _st->print_cr("--------------------------------------");
519 }
520 }
521 return sz;
522 }
523
524 void CompactibleFreeListSpace::dump_at_safepoint_with_locks(CMSCollector* c,
525 outputStream* st) {
526 st->print_cr("\n=========================");
527 st->print_cr("Block layout in CMS Heap:");
528 st->print_cr("=========================");
529 BlkPrintingClosure bpcl(c, this, c->markBitMap(), st);
530 blk_iterate(&bpcl);
531
532 st->print_cr("\n=======================================");
533 st->print_cr("Order & Layout of Promotion Info Blocks");
534 st->print_cr("=======================================");
535 print_promo_info_blocks(st);
1965 reportFreeListStatistics();
1966 }
1967 }
1968
1969 // Iteration support, mostly delegated from a CMS generation
1970
1971 void CompactibleFreeListSpace::save_marks() {
1972 assert(Thread::current()->is_VM_thread(),
1973 "Global variable should only be set when single-threaded");
1974 // Mark the "end" of the used space at the time of this call;
1975 // note, however, that promoted objects from this point
1976 // on are tracked in the _promoInfo below.
1977 set_saved_mark_word(unallocated_block());
1978 #ifdef ASSERT
1979 // Check the sanity of save_marks() etc.
1980 MemRegion ur = used_region();
1981 MemRegion urasm = used_region_at_save_marks();
1982 assert(ur.contains(urasm),
1983 err_msg(" Error at save_marks(): [" PTR_FORMAT "," PTR_FORMAT ")"
1984 " should contain [" PTR_FORMAT "," PTR_FORMAT ")",
1985 ur.start(), ur.end(), urasm.start(), urasm.end()));
1986 #endif
1987 // inform allocator that promotions should be tracked.
1988 assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");
1989 _promoInfo.startTrackingPromotions();
1990 }
1991
1992 bool CompactibleFreeListSpace::no_allocs_since_save_marks() {
1993 assert(_promoInfo.tracking(), "No preceding save_marks?");
1994 assert(SharedHeap::heap()->n_par_threads() == 0,
1995 "Shouldn't be called if using parallel gc.");
1996 return _promoInfo.noPromotions();
1997 }
1998
1999 #define CFLS_OOP_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix) \
2000 \
2001 void CompactibleFreeListSpace:: \
2002 oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk) { \
2003 assert(SharedHeap::heap()->n_par_threads() == 0, \
2004 "Shouldn't be called (yet) during parallel part of gc."); \
2005 _promoInfo.promoted_oops_iterate##nv_suffix(blk); \
2188 }
2189 }
2190
2191 void CompactibleFreeListSpace::clearFLCensus() {
2192 assert_locked();
2193 size_t i;
2194 for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2195 AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
2196 fl->set_prev_sweep(fl->count());
2197 fl->set_coal_births(0);
2198 fl->set_coal_deaths(0);
2199 fl->set_split_births(0);
2200 fl->set_split_deaths(0);
2201 }
2202 }
2203
2204 void CompactibleFreeListSpace::endSweepFLCensus(size_t sweep_count) {
2205 if (PrintFLSStatistics > 0) {
2206 HeapWord* largestAddr = (HeapWord*) dictionary()->find_largest_dict();
2207 gclog_or_tty->print_cr("CMS: Large block " PTR_FORMAT,
2208 largestAddr);
2209 }
2210 setFLSurplus();
2211 setFLHints();
2212 if (PrintGC && PrintFLSCensus > 0) {
2213 printFLCensus(sweep_count);
2214 }
2215 clearFLCensus();
2216 assert_locked();
2217 _dictionary->end_sweep_dict_census(CMSLargeSplitSurplusPercent);
2218 }
2219
2220 bool CompactibleFreeListSpace::coalOverPopulated(size_t size) {
2221 if (size < SmallForDictionary) {
2222 AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
2223 return (fl->coal_desired() < 0) ||
2224 ((int)fl->count() > fl->coal_desired());
2225 } else {
2226 return dictionary()->coal_dict_over_populated(size);
2227 }
2228 }
2337 oop p = oop(addr);
2338 guarantee(p->is_oop(), "Should be an oop");
2339 res = _sp->adjustObjectSize(p->size());
2340 if (_sp->obj_is_alive(addr)) {
2341 was_live = true;
2342 p->verify();
2343 }
2344 } else {
2345 FreeChunk* fc = (FreeChunk*)addr;
2346 res = fc->size();
2347 if (FLSVerifyLists && !fc->cantCoalesce()) {
2348 guarantee(_sp->verify_chunk_in_free_list(fc),
2349 "Chunk should be on a free list");
2350 }
2351 }
2352 if (res == 0) {
2353 gclog_or_tty->print_cr("Livelock: no rank reduction!");
2354 gclog_or_tty->print_cr(
2355 " Current: addr = " PTR_FORMAT ", size = " SIZE_FORMAT ", obj = %s, live = %s \n"
2356 " Previous: addr = " PTR_FORMAT ", size = " SIZE_FORMAT ", obj = %s, live = %s \n",
2357 addr, res, was_obj ?"true":"false", was_live ?"true":"false",
2358 _last_addr, _last_size, _last_was_obj?"true":"false", _last_was_live?"true":"false");
2359 _sp->print_on(gclog_or_tty);
2360 guarantee(false, "Seppuku!");
2361 }
2362 _last_addr = addr;
2363 _last_size = res;
2364 _last_was_obj = was_obj;
2365 _last_was_live = was_live;
2366 return res;
2367 }
2368 };
2369
2370 class VerifyAllOopsClosure: public OopClosure {
2371 private:
2372 const CMSCollector* _collector;
2373 const CompactibleFreeListSpace* _sp;
2374 const MemRegion _span;
2375 const bool _past_remark;
2376 const CMSBitMap* _bit_map;
2377
2378 protected:
|
410 // should one, pessimistically for the rare cases when res
411 // calculated above is less than IndexSetSize,
412 // just return res calculated above? My reasoning was that
413 // those cases will be so rare that the extra time spent doesn't
414 // really matter....
415 // Note: do not change the loop test i >= res + IndexSetStride
416 // to i > res below, because i is unsigned and res may be zero.
417 for (size_t i = IndexSetSize - 1; i >= res + IndexSetStride;
418 i -= IndexSetStride) {
419 if (_indexedFreeList[i].head() != NULL) {
420 assert(_indexedFreeList[i].count() != 0, "Inconsistent FreeList");
421 return i;
422 }
423 }
424 return res;
425 }
426
427 void LinearAllocBlock::print_on(outputStream* st) const {
428 st->print_cr(" LinearAllocBlock: ptr = " PTR_FORMAT ", word_size = " SIZE_FORMAT
429 ", refillsize = " SIZE_FORMAT ", allocation_size_limit = " SIZE_FORMAT,
430 p2i(_ptr), _word_size, _refillSize, _allocation_size_limit);
431 }
432
433 void CompactibleFreeListSpace::print_on(outputStream* st) const {
434 st->print_cr("COMPACTIBLE FREELIST SPACE");
435 st->print_cr(" Space:");
436 Space::print_on(st);
437
438 st->print_cr("promoInfo:");
439 _promoInfo.print_on(st);
440
441 st->print_cr("_smallLinearAllocBlock");
442 _smallLinearAllocBlock.print_on(st);
443
444 // dump_memory_block(_smallLinearAllocBlock->_ptr, 128);
445
446 st->print_cr(" _fitStrategy = %s, _adaptive_freelists = %s",
447 _fitStrategy?"true":"false", _adaptive_freelists?"true":"false");
448 }
449
450 void CompactibleFreeListSpace::print_indexed_free_lists(outputStream* st)
451 const {
452 reportIndexedFreeListStatistics();
453 gclog_or_tty->print_cr("Layout of Indexed Freelists");
454 gclog_or_tty->print_cr("---------------------------");
455 AdaptiveFreeList<FreeChunk>::print_labels_on(st, "size");
456 for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
457 _indexedFreeList[i].print_on(gclog_or_tty);
458 for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
459 fc = fc->next()) {
460 gclog_or_tty->print_cr("\t[" PTR_FORMAT "," PTR_FORMAT ") %s",
461 p2i(fc), p2i((HeapWord*)fc + i),
462 fc->cantCoalesce() ? "\t CC" : "");
463 }
464 }
465 }
466
467 void CompactibleFreeListSpace::print_promo_info_blocks(outputStream* st)
468 const {
469 _promoInfo.print_on(st);
470 }
471
472 void CompactibleFreeListSpace::print_dictionary_free_lists(outputStream* st)
473 const {
474 _dictionary->report_statistics();
475 st->print_cr("Layout of Freelists in Tree");
476 st->print_cr("---------------------------");
477 _dictionary->print_free_lists(st);
478 }
479
480 class BlkPrintingClosure: public BlkClosure {
481 const CMSCollector* _collector;
485 outputStream* _st;
486 public:
487 BlkPrintingClosure(const CMSCollector* collector,
488 const CompactibleFreeListSpace* sp,
489 const CMSBitMap* live_bit_map,
490 outputStream* st):
491 _collector(collector),
492 _sp(sp),
493 _live_bit_map(live_bit_map),
494 _post_remark(collector->abstract_state() > CMSCollector::FinalMarking),
495 _st(st) { }
496 size_t do_blk(HeapWord* addr);
497 };
498
499 size_t BlkPrintingClosure::do_blk(HeapWord* addr) {
500 size_t sz = _sp->block_size_no_stall(addr, _collector);
501 assert(sz != 0, "Should always be able to compute a size");
502 if (_sp->block_is_obj(addr)) {
503 const bool dead = _post_remark && !_live_bit_map->isMarked(addr);
504 _st->print_cr(PTR_FORMAT ": %s object of size " SIZE_FORMAT "%s",
505 p2i(addr),
506 dead ? "dead" : "live",
507 sz,
508 (!dead && CMSPrintObjectsInDump) ? ":" : ".");
509 if (CMSPrintObjectsInDump && !dead) {
510 oop(addr)->print_on(_st);
511 _st->print_cr("--------------------------------------");
512 }
513 } else { // free block
514 _st->print_cr(PTR_FORMAT ": free block of size " SIZE_FORMAT "%s",
515 p2i(addr), sz, CMSPrintChunksInDump ? ":" : ".");
516 if (CMSPrintChunksInDump) {
517 ((FreeChunk*)addr)->print_on(_st);
518 _st->print_cr("--------------------------------------");
519 }
520 }
521 return sz;
522 }
523
524 void CompactibleFreeListSpace::dump_at_safepoint_with_locks(CMSCollector* c,
525 outputStream* st) {
526 st->print_cr("\n=========================");
527 st->print_cr("Block layout in CMS Heap:");
528 st->print_cr("=========================");
529 BlkPrintingClosure bpcl(c, this, c->markBitMap(), st);
530 blk_iterate(&bpcl);
531
532 st->print_cr("\n=======================================");
533 st->print_cr("Order & Layout of Promotion Info Blocks");
534 st->print_cr("=======================================");
535 print_promo_info_blocks(st);
1965 reportFreeListStatistics();
1966 }
1967 }
1968
1969 // Iteration support, mostly delegated from a CMS generation
1970
1971 void CompactibleFreeListSpace::save_marks() {
1972 assert(Thread::current()->is_VM_thread(),
1973 "Global variable should only be set when single-threaded");
1974 // Mark the "end" of the used space at the time of this call;
1975 // note, however, that promoted objects from this point
1976 // on are tracked in the _promoInfo below.
1977 set_saved_mark_word(unallocated_block());
1978 #ifdef ASSERT
1979 // Check the sanity of save_marks() etc.
1980 MemRegion ur = used_region();
1981 MemRegion urasm = used_region_at_save_marks();
1982 assert(ur.contains(urasm),
1983 err_msg(" Error at save_marks(): [" PTR_FORMAT "," PTR_FORMAT ")"
1984 " should contain [" PTR_FORMAT "," PTR_FORMAT ")",
1985 p2i(ur.start()), p2i(ur.end()), p2i(urasm.start()), p2i(urasm.end())));
1986 #endif
1987 // inform allocator that promotions should be tracked.
1988 assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");
1989 _promoInfo.startTrackingPromotions();
1990 }
1991
1992 bool CompactibleFreeListSpace::no_allocs_since_save_marks() {
1993 assert(_promoInfo.tracking(), "No preceding save_marks?");
1994 assert(SharedHeap::heap()->n_par_threads() == 0,
1995 "Shouldn't be called if using parallel gc.");
1996 return _promoInfo.noPromotions();
1997 }
1998
1999 #define CFLS_OOP_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix) \
2000 \
2001 void CompactibleFreeListSpace:: \
2002 oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk) { \
2003 assert(SharedHeap::heap()->n_par_threads() == 0, \
2004 "Shouldn't be called (yet) during parallel part of gc."); \
2005 _promoInfo.promoted_oops_iterate##nv_suffix(blk); \
2188 }
2189 }
2190
2191 void CompactibleFreeListSpace::clearFLCensus() {
2192 assert_locked();
2193 size_t i;
2194 for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2195 AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
2196 fl->set_prev_sweep(fl->count());
2197 fl->set_coal_births(0);
2198 fl->set_coal_deaths(0);
2199 fl->set_split_births(0);
2200 fl->set_split_deaths(0);
2201 }
2202 }
2203
2204 void CompactibleFreeListSpace::endSweepFLCensus(size_t sweep_count) {
2205 if (PrintFLSStatistics > 0) {
2206 HeapWord* largestAddr = (HeapWord*) dictionary()->find_largest_dict();
2207 gclog_or_tty->print_cr("CMS: Large block " PTR_FORMAT,
2208 p2i(largestAddr));
2209 }
2210 setFLSurplus();
2211 setFLHints();
2212 if (PrintGC && PrintFLSCensus > 0) {
2213 printFLCensus(sweep_count);
2214 }
2215 clearFLCensus();
2216 assert_locked();
2217 _dictionary->end_sweep_dict_census(CMSLargeSplitSurplusPercent);
2218 }
2219
2220 bool CompactibleFreeListSpace::coalOverPopulated(size_t size) {
2221 if (size < SmallForDictionary) {
2222 AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
2223 return (fl->coal_desired() < 0) ||
2224 ((int)fl->count() > fl->coal_desired());
2225 } else {
2226 return dictionary()->coal_dict_over_populated(size);
2227 }
2228 }
2337 oop p = oop(addr);
2338 guarantee(p->is_oop(), "Should be an oop");
2339 res = _sp->adjustObjectSize(p->size());
2340 if (_sp->obj_is_alive(addr)) {
2341 was_live = true;
2342 p->verify();
2343 }
2344 } else {
2345 FreeChunk* fc = (FreeChunk*)addr;
2346 res = fc->size();
2347 if (FLSVerifyLists && !fc->cantCoalesce()) {
2348 guarantee(_sp->verify_chunk_in_free_list(fc),
2349 "Chunk should be on a free list");
2350 }
2351 }
2352 if (res == 0) {
2353 gclog_or_tty->print_cr("Livelock: no rank reduction!");
2354 gclog_or_tty->print_cr(
2355 " Current: addr = " PTR_FORMAT ", size = " SIZE_FORMAT ", obj = %s, live = %s \n"
2356 " Previous: addr = " PTR_FORMAT ", size = " SIZE_FORMAT ", obj = %s, live = %s \n",
2357 p2i(addr), res, was_obj ?"true":"false", was_live ?"true":"false",
2358 p2i(_last_addr), _last_size, _last_was_obj?"true":"false", _last_was_live?"true":"false");
2359 _sp->print_on(gclog_or_tty);
2360 guarantee(false, "Seppuku!");
2361 }
2362 _last_addr = addr;
2363 _last_size = res;
2364 _last_was_obj = was_obj;
2365 _last_was_live = was_live;
2366 return res;
2367 }
2368 };
2369
2370 class VerifyAllOopsClosure: public OopClosure {
2371 private:
2372 const CMSCollector* _collector;
2373 const CompactibleFreeListSpace* _sp;
2374 const MemRegion _span;
2375 const bool _past_remark;
2376 const CMSBitMap* _bit_map;
2377
2378 protected:
|