344 public:
345 void do_space(Space* sp) {
346 sp->adjust_pointers();
347 }
348 };
349
350 void Generation::adjust_pointers() {
351 // Note that this is done over all spaces, not just the compactible
352 // ones.
353 AdjustPointersClosure blk;
354 space_iterate(&blk, true);
355 }
356
357 void Generation::compact() {
358 CompactibleSpace* sp = first_compaction_space();
359 while (sp != NULL) {
360 sp->compact();
361 sp = sp->next_compaction_space();
362 }
363 }
364
365 CardGeneration::CardGeneration(ReservedSpace rs, size_t initial_byte_size,
366 int level,
367 GenRemSet* remset) :
368 Generation(rs, initial_byte_size, level), _rs(remset),
369 _shrink_factor(0), _min_heap_delta_bytes(), _capacity_at_prologue(),
370 _used_at_prologue()
371 {
372 HeapWord* start = (HeapWord*)rs.base();
373 size_t reserved_byte_size = rs.size();
374 assert((uintptr_t(start) & 3) == 0, "bad alignment");
375 assert((reserved_byte_size & 3) == 0, "bad alignment");
376 MemRegion reserved_mr(start, heap_word_size(reserved_byte_size));
377 _bts = new BlockOffsetSharedArray(reserved_mr,
378 heap_word_size(initial_byte_size));
379 MemRegion committed_mr(start, heap_word_size(initial_byte_size));
380 _rs->resize_covered_region(committed_mr);
381 if (_bts == NULL)
382 vm_exit_during_initialization("Could not allocate a BlockOffsetArray");
383
384 // Verify that the start and end of this generation is the start of a card.
385 // If this wasn't true, a single card could span more than on generation,
386 // which would cause problems when we commit/uncommit memory, and when we
387 // clear and dirty cards.
388 guarantee(_rs->is_aligned(reserved_mr.start()), "generation must be card aligned");
389 if (reserved_mr.end() != Universe::heap()->reserved_region().end()) {
390 // Don't check at the very end of the heap as we'll assert that we're probing off
391 // the end if we try.
392 guarantee(_rs->is_aligned(reserved_mr.end()), "generation must be card aligned");
393 }
394 _min_heap_delta_bytes = MinHeapDeltaBytes;
395 _capacity_at_prologue = initial_byte_size;
396 _used_at_prologue = 0;
397 }
398
399 bool CardGeneration::expand(size_t bytes, size_t expand_bytes) {
400 assert_locked_or_safepoint(Heap_lock);
401 if (bytes == 0) {
402 return true; // That's what grow_by(0) would return
403 }
404 size_t aligned_bytes = ReservedSpace::page_align_size_up(bytes);
405 if (aligned_bytes == 0){
406 // The alignment caused the number of bytes to wrap. An expand_by(0) will
407 // return true with the implication that an expansion was done when it
408 // was not. A call to expand implies a best effort to expand by "bytes"
409 // but not a guarantee. Align down to give a best effort. This is likely
410 // the most that the generation can expand since it has some capacity to
411 // start with.
412 aligned_bytes = ReservedSpace::page_align_size_down(bytes);
413 }
414 size_t aligned_expand_bytes = ReservedSpace::page_align_size_up(expand_bytes);
415 bool success = false;
416 if (aligned_expand_bytes > aligned_bytes) {
417 success = grow_by(aligned_expand_bytes);
418 }
419 if (!success) {
420 success = grow_by(aligned_bytes);
421 }
422 if (!success) {
423 success = grow_to_reserved();
424 }
425 if (PrintGC && Verbose) {
426 if (success && GC_locker::is_active_and_needs_gc()) {
427 gclog_or_tty->print_cr("Garbage collection disabled, expanded heap instead");
428 }
429 }
430
431 return success;
432 }
433
434
435 // No young generation references, clear this generation's cards.
436 void CardGeneration::clear_remembered_set() {
437 _rs->clear(reserved());
438 }
439
440
441 // Objects in this generation may have moved, invalidate this
442 // generation's cards.
443 void CardGeneration::invalidate_remembered_set() {
444 _rs->invalidate(used_region());
445 }
446
447
448 void CardGeneration::compute_new_size() {
449 assert(_shrink_factor <= 100, "invalid shrink factor");
450 size_t current_shrink_factor = _shrink_factor;
451 _shrink_factor = 0;
452
453 // We don't have floating point command-line arguments
454 // Note: argument processing ensures that MinHeapFreeRatio < 100.
455 const double minimum_free_percentage = MinHeapFreeRatio / 100.0;
456 const double maximum_used_percentage = 1.0 - minimum_free_percentage;
457
458 // Compute some numbers about the state of the heap.
459 const size_t used_after_gc = used();
460 const size_t capacity_after_gc = capacity();
461
462 const double min_tmp = used_after_gc / maximum_used_percentage;
463 size_t minimum_desired_capacity = (size_t)MIN2(min_tmp, double(max_uintx));
464 // Don't shrink less than the initial generation size
465 minimum_desired_capacity = MAX2(minimum_desired_capacity,
466 spec()->init_size());
467 assert(used_after_gc <= minimum_desired_capacity, "sanity check");
468
469 if (PrintGC && Verbose) {
470 const size_t free_after_gc = free();
471 const double free_percentage = ((double)free_after_gc) / capacity_after_gc;
472 gclog_or_tty->print_cr("TenuredGeneration::compute_new_size: ");
473 gclog_or_tty->print_cr(" "
474 " minimum_free_percentage: %6.2f"
475 " maximum_used_percentage: %6.2f",
476 minimum_free_percentage,
477 maximum_used_percentage);
478 gclog_or_tty->print_cr(" "
479 " free_after_gc : %6.1fK"
480 " used_after_gc : %6.1fK"
481 " capacity_after_gc : %6.1fK",
482 free_after_gc / (double) K,
483 used_after_gc / (double) K,
484 capacity_after_gc / (double) K);
485 gclog_or_tty->print_cr(" "
486 " free_percentage: %6.2f",
487 free_percentage);
488 }
489
490 if (capacity_after_gc < minimum_desired_capacity) {
491 // If we have less free space than we want then expand
492 size_t expand_bytes = minimum_desired_capacity - capacity_after_gc;
493 // Don't expand unless it's significant
494 if (expand_bytes >= _min_heap_delta_bytes) {
495 expand(expand_bytes, 0); // safe if expansion fails
496 }
497 if (PrintGC && Verbose) {
498 gclog_or_tty->print_cr(" expanding:"
499 " minimum_desired_capacity: %6.1fK"
500 " expand_bytes: %6.1fK"
501 " _min_heap_delta_bytes: %6.1fK",
502 minimum_desired_capacity / (double) K,
503 expand_bytes / (double) K,
504 _min_heap_delta_bytes / (double) K);
505 }
506 return;
507 }
508
509 // No expansion, now see if we want to shrink
510 size_t shrink_bytes = 0;
511 // We would never want to shrink more than this
512 size_t max_shrink_bytes = capacity_after_gc - minimum_desired_capacity;
513
514 if (MaxHeapFreeRatio < 100) {
515 const double maximum_free_percentage = MaxHeapFreeRatio / 100.0;
516 const double minimum_used_percentage = 1.0 - maximum_free_percentage;
517 const double max_tmp = used_after_gc / minimum_used_percentage;
518 size_t maximum_desired_capacity = (size_t)MIN2(max_tmp, double(max_uintx));
519 maximum_desired_capacity = MAX2(maximum_desired_capacity,
520 spec()->init_size());
521 if (PrintGC && Verbose) {
522 gclog_or_tty->print_cr(" "
523 " maximum_free_percentage: %6.2f"
524 " minimum_used_percentage: %6.2f",
525 maximum_free_percentage,
526 minimum_used_percentage);
527 gclog_or_tty->print_cr(" "
528 " _capacity_at_prologue: %6.1fK"
529 " minimum_desired_capacity: %6.1fK"
530 " maximum_desired_capacity: %6.1fK",
531 _capacity_at_prologue / (double) K,
532 minimum_desired_capacity / (double) K,
533 maximum_desired_capacity / (double) K);
534 }
535 assert(minimum_desired_capacity <= maximum_desired_capacity,
536 "sanity check");
537
538 if (capacity_after_gc > maximum_desired_capacity) {
539 // Capacity too large, compute shrinking size
540 shrink_bytes = capacity_after_gc - maximum_desired_capacity;
541 // We don't want shrink all the way back to initSize if people call
542 // System.gc(), because some programs do that between "phases" and then
543 // we'd just have to grow the heap up again for the next phase. So we
544 // damp the shrinking: 0% on the first call, 10% on the second call, 40%
545 // on the third call, and 100% by the fourth call. But if we recompute
546 // size without shrinking, it goes back to 0%.
547 shrink_bytes = shrink_bytes / 100 * current_shrink_factor;
548 assert(shrink_bytes <= max_shrink_bytes, "invalid shrink size");
549 if (current_shrink_factor == 0) {
550 _shrink_factor = 10;
551 } else {
552 _shrink_factor = MIN2(current_shrink_factor * 4, (size_t) 100);
553 }
554 if (PrintGC && Verbose) {
555 gclog_or_tty->print_cr(" "
556 " shrinking:"
557 " initSize: %.1fK"
558 " maximum_desired_capacity: %.1fK",
559 spec()->init_size() / (double) K,
560 maximum_desired_capacity / (double) K);
561 gclog_or_tty->print_cr(" "
562 " shrink_bytes: %.1fK"
563 " current_shrink_factor: " SIZE_FORMAT
564 " new shrink factor: " SIZE_FORMAT
565 " _min_heap_delta_bytes: %.1fK",
566 shrink_bytes / (double) K,
567 current_shrink_factor,
568 _shrink_factor,
569 _min_heap_delta_bytes / (double) K);
570 }
571 }
572 }
573
574 if (capacity_after_gc > _capacity_at_prologue) {
575 // We might have expanded for promotions, in which case we might want to
576 // take back that expansion if there's room after GC. That keeps us from
577 // stretching the heap with promotions when there's plenty of room.
578 size_t expansion_for_promotion = capacity_after_gc - _capacity_at_prologue;
579 expansion_for_promotion = MIN2(expansion_for_promotion, max_shrink_bytes);
580 // We have two shrinking computations, take the largest
581 shrink_bytes = MAX2(shrink_bytes, expansion_for_promotion);
582 assert(shrink_bytes <= max_shrink_bytes, "invalid shrink size");
583 if (PrintGC && Verbose) {
584 gclog_or_tty->print_cr(" "
585 " aggressive shrinking:"
586 " _capacity_at_prologue: %.1fK"
587 " capacity_after_gc: %.1fK"
588 " expansion_for_promotion: %.1fK"
589 " shrink_bytes: %.1fK",
590 capacity_after_gc / (double) K,
591 _capacity_at_prologue / (double) K,
592 expansion_for_promotion / (double) K,
593 shrink_bytes / (double) K);
594 }
595 }
596 // Don't shrink unless it's significant
597 if (shrink_bytes >= _min_heap_delta_bytes) {
598 shrink(shrink_bytes);
599 }
600 }
601
602 // Currently nothing to do.
603 void CardGeneration::prepare_for_verify() {}
604
|
344 public:
345 void do_space(Space* sp) {
346 sp->adjust_pointers();
347 }
348 };
349
350 void Generation::adjust_pointers() {
351 // Note that this is done over all spaces, not just the compactible
352 // ones.
353 AdjustPointersClosure blk;
354 space_iterate(&blk, true);
355 }
356
357 void Generation::compact() {
358 CompactibleSpace* sp = first_compaction_space();
359 while (sp != NULL) {
360 sp->compact();
361 sp = sp->next_compaction_space();
362 }
363 }
|