45 assert(_init_gen_size != 0, "Should have a finite size");
46 _virtual_space = new PSVirtualSpace(rs, alignment);
47 if (!virtual_space()->expand_by(_init_gen_size)) {
48 vm_exit_during_initialization("Could not reserve enough space for "
49 "object heap");
50 }
51 }
52
53 void PSYoungGen::initialize(ReservedSpace rs, size_t alignment) {
54 initialize_virtual_space(rs, alignment);
55 initialize_work();
56 }
57
58 void PSYoungGen::initialize_work() {
59
60 _reserved = MemRegion((HeapWord*)virtual_space()->low_boundary(),
61 (HeapWord*)virtual_space()->high_boundary());
62
63 MemRegion cmr((HeapWord*)virtual_space()->low(),
64 (HeapWord*)virtual_space()->high());
65 Universe::heap()->barrier_set()->resize_covered_region(cmr);
66
67 if (ZapUnusedHeapArea) {
68 // Mangle newly committed space immediately because it
69 // can be done here more simply that after the new
70 // spaces have been computed.
71 SpaceMangler::mangle_region(cmr);
72 }
73
74 if (UseNUMA) {
75 _eden_space = new MutableNUMASpace(virtual_space()->alignment());
76 } else {
77 _eden_space = new MutableSpace(virtual_space()->alignment());
78 }
79 _from_space = new MutableSpace(virtual_space()->alignment());
80 _to_space = new MutableSpace(virtual_space()->alignment());
81
82 if (_eden_space == NULL || _from_space == NULL || _to_space == NULL) {
83 vm_exit_during_initialization("Could not allocate a young gen space");
84 }
85
86 // Allocate the mark sweep views of spaces
87 _eden_mark_sweep =
88 new PSMarkSweepDecorator(_eden_space, NULL, MarkSweepDeadRatio);
89 _from_mark_sweep =
90 new PSMarkSweepDecorator(_from_space, NULL, MarkSweepDeadRatio);
91 _to_mark_sweep =
92 new PSMarkSweepDecorator(_to_space, NULL, MarkSweepDeadRatio);
93
94 if (_eden_mark_sweep == NULL ||
95 _from_mark_sweep == NULL ||
96 _to_mark_sweep == NULL) {
97 vm_exit_during_initialization("Could not complete allocation"
98 " of the young generation");
99 }
100
101 // Generation Counters - generation 0, 3 subspaces
102 _gen_counters = new PSGenerationCounters("new", 0, 3, _min_gen_size,
103 _max_gen_size, _virtual_space);
104
105 // Compute maximum space sizes for performance counters
106 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
107 size_t alignment = heap->space_alignment();
108 size_t size = virtual_space()->reserved_size();
109
110 size_t max_survivor_size;
111 size_t max_eden_size;
112
113 if (UseAdaptiveSizePolicy) {
114 max_survivor_size = size / MinSurvivorRatio;
115
116 // round the survivor space size down to the nearest alignment
117 // and make sure its size is greater than 0.
118 max_survivor_size = align_size_down(max_survivor_size, alignment);
119 max_survivor_size = MAX2(max_survivor_size, alignment);
120
121 // set the maximum size of eden to be the size of the young gen
122 // less two times the minimum survivor size. The minimum survivor
123 // size for UseAdaptiveSizePolicy is one alignment.
124 max_eden_size = size - 2 * alignment;
125 } else {
126 max_survivor_size = size / InitialSurvivorRatio;
136 // is dependent on the committed portion (current capacity) of the
137 // generation - the less space committed, the smaller the survivor
138 // space, possibly as small as an alignment. However, we are interested
139 // in the case where the young generation is 100% committed, as this
140 // is the point where eden reaches its maximum size. At this point,
141 // the size of a survivor space is max_survivor_size.
142 max_eden_size = size - 2 * max_survivor_size;
143 }
144
145 _eden_counters = new SpaceCounters("eden", 0, max_eden_size, _eden_space,
146 _gen_counters);
147 _from_counters = new SpaceCounters("s0", 1, max_survivor_size, _from_space,
148 _gen_counters);
149 _to_counters = new SpaceCounters("s1", 2, max_survivor_size, _to_space,
150 _gen_counters);
151
152 compute_initial_space_boundaries();
153 }
154
155 void PSYoungGen::compute_initial_space_boundaries() {
156 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
157 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
158
159 // Compute sizes
160 size_t alignment = heap->space_alignment();
161 size_t size = virtual_space()->committed_size();
162 assert(size >= 3 * alignment, "Young space is not large enough for eden + 2 survivors");
163
164 size_t survivor_size = size / InitialSurvivorRatio;
165 survivor_size = align_size_down(survivor_size, alignment);
166 // ... but never less than an alignment
167 survivor_size = MAX2(survivor_size, alignment);
168
169 // Young generation is eden + 2 survivor spaces
170 size_t eden_size = size - (2 * survivor_size);
171
172 // Now go ahead and set 'em.
173 set_space_boundaries(eden_size, survivor_size);
174 space_invariants();
175
176 if (UsePerfData) {
177 _eden_counters->update_capacity();
191 char *to_start = eden_start + eden_size;
192 char *from_start = to_start + survivor_size;
193 char *from_end = from_start + survivor_size;
194
195 assert(from_end == virtual_space()->high(), "just checking");
196 assert(is_object_aligned((intptr_t)eden_start), "checking alignment");
197 assert(is_object_aligned((intptr_t)to_start), "checking alignment");
198 assert(is_object_aligned((intptr_t)from_start), "checking alignment");
199
200 MemRegion eden_mr((HeapWord*)eden_start, (HeapWord*)to_start);
201 MemRegion to_mr ((HeapWord*)to_start, (HeapWord*)from_start);
202 MemRegion from_mr((HeapWord*)from_start, (HeapWord*)from_end);
203
204 eden_space()->initialize(eden_mr, true, ZapUnusedHeapArea);
205 to_space()->initialize(to_mr , true, ZapUnusedHeapArea);
206 from_space()->initialize(from_mr, true, ZapUnusedHeapArea);
207 }
208
209 #ifndef PRODUCT
210 void PSYoungGen::space_invariants() {
211 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
212 const size_t alignment = heap->space_alignment();
213
214 // Currently, our eden size cannot shrink to zero
215 guarantee(eden_space()->capacity_in_bytes() >= alignment, "eden too small");
216 guarantee(from_space()->capacity_in_bytes() >= alignment, "from too small");
217 guarantee(to_space()->capacity_in_bytes() >= alignment, "to too small");
218
219 // Relationship of spaces to each other
220 char* eden_start = (char*)eden_space()->bottom();
221 char* eden_end = (char*)eden_space()->end();
222 char* from_start = (char*)from_space()->bottom();
223 char* from_end = (char*)from_space()->end();
224 char* to_start = (char*)to_space()->bottom();
225 char* to_end = (char*)to_space()->end();
226
227 guarantee(eden_start >= virtual_space()->low(), "eden bottom");
228 guarantee(eden_start < eden_end, "eden space consistency");
229 guarantee(from_start < from_end, "from space consistency");
230 guarantee(to_start < to_end, "to space consistency");
231
477 sizeof(char)));
478 }
479
480 // There's nothing to do if the new sizes are the same as the current
481 if (requested_survivor_size == to_space()->capacity_in_bytes() &&
482 requested_survivor_size == from_space()->capacity_in_bytes() &&
483 requested_eden_size == eden_space()->capacity_in_bytes()) {
484 if (PrintAdaptiveSizePolicy && Verbose) {
485 gclog_or_tty->print_cr(" capacities are the right sizes, returning");
486 }
487 return;
488 }
489
490 char* eden_start = (char*)eden_space()->bottom();
491 char* eden_end = (char*)eden_space()->end();
492 char* from_start = (char*)from_space()->bottom();
493 char* from_end = (char*)from_space()->end();
494 char* to_start = (char*)to_space()->bottom();
495 char* to_end = (char*)to_space()->end();
496
497 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
498 const size_t alignment = heap->space_alignment();
499 const bool maintain_minimum =
500 (requested_eden_size + 2 * requested_survivor_size) <= min_gen_size();
501
502 bool eden_from_to_order = from_start < to_start;
503 // Check whether from space is below to space
504 if (eden_from_to_order) {
505 // Eden, from, to
506 eden_from_to_order = true;
507 if (PrintAdaptiveSizePolicy && Verbose) {
508 gclog_or_tty->print_cr(" Eden, from, to:");
509 }
510
511 // Set eden
512 // "requested_eden_size" is a goal for the size of eden
513 // and may not be attainable. "eden_size" below is
514 // calculated based on the location of from-space and
515 // the goal for the size of eden. from-space is
516 // fixed in place because it contains live data.
517 // The calculation is done this way to avoid 32bit
529 sizeof(char));
530 } else {
531 eden_size = MIN2(requested_eden_size,
532 pointer_delta(from_start, eden_start, sizeof(char)));
533 }
534
535 eden_end = eden_start + eden_size;
536 assert(eden_end >= eden_start, "addition overflowed");
537
538 // To may resize into from space as long as it is clear of live data.
539 // From space must remain page aligned, though, so we need to do some
540 // extra calculations.
541
542 // First calculate an optimal to-space
543 to_end = (char*)virtual_space()->high();
544 to_start = (char*)pointer_delta(to_end, (char*)requested_survivor_size,
545 sizeof(char));
546
547 // Does the optimal to-space overlap from-space?
548 if (to_start < (char*)from_space()->end()) {
549 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
550
551 // Calculate the minimum offset possible for from_end
552 size_t from_size = pointer_delta(from_space()->top(), from_start, sizeof(char));
553
554 // Should we be in this method if from_space is empty? Why not the set_space method? FIX ME!
555 if (from_size == 0) {
556 from_size = alignment;
557 } else {
558 from_size = align_size_up(from_size, alignment);
559 }
560
561 from_end = from_start + from_size;
562 assert(from_end > from_start, "addition overflow or from_size problem");
563
564 guarantee(from_end <= (char*)from_space()->end(), "from_end moved to the right");
565
566 // Now update to_start with the new from_end
567 to_start = MAX2(from_end, to_start);
568 }
569
570 guarantee(to_start != to_end, "to space is zero sized");
691 HeapWord* limit = (HeapWord*) virtual_space()->high();
692 eden_space()->check_mangled_unused_area(limit);
693 from_space()->check_mangled_unused_area(limit);
694 to_space()->check_mangled_unused_area(limit);
695 }
696 // When an existing space is being initialized, it is not
697 // mangled because the space has been previously mangled.
698 eden_space()->initialize(edenMR,
699 SpaceDecorator::Clear,
700 SpaceDecorator::DontMangle);
701 to_space()->initialize(toMR,
702 SpaceDecorator::Clear,
703 SpaceDecorator::DontMangle);
704 from_space()->initialize(fromMR,
705 SpaceDecorator::DontClear,
706 SpaceDecorator::DontMangle);
707
708 assert(from_space()->top() == old_from_top, "from top changed!");
709
710 if (PrintAdaptiveSizePolicy) {
711 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
712 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
713
714 gclog_or_tty->print("AdaptiveSizePolicy::survivor space sizes: "
715 "collection: %d "
716 "(" SIZE_FORMAT ", " SIZE_FORMAT ") -> "
717 "(" SIZE_FORMAT ", " SIZE_FORMAT ") ",
718 heap->total_collections(),
719 old_from, old_to,
720 from_space()->capacity_in_bytes(),
721 to_space()->capacity_in_bytes());
722 gclog_or_tty->cr();
723 }
724 }
725
726 void PSYoungGen::swap_spaces() {
727 MutableSpace* s = from_space();
728 _from_space = to_space();
729 _to_space = s;
730
731 // Now update the decorators.
732 PSMarkSweepDecorator* md = from_mark_sweep();
733 _from_mark_sweep = to_mark_sweep();
826 size_t PSYoungGen::available_for_expansion() {
827 ShouldNotReachHere();
828 return 0;
829 }
830
831 size_t PSYoungGen::available_for_contraction() {
832 ShouldNotReachHere();
833 return 0;
834 }
835
836 size_t PSYoungGen::available_to_min_gen() {
837 assert(virtual_space()->committed_size() >= min_gen_size(), "Invariant");
838 return virtual_space()->committed_size() - min_gen_size();
839 }
840
841 // This method assumes that from-space has live data and that
842 // any shrinkage of the young gen is limited by location of
843 // from-space.
844 size_t PSYoungGen::available_to_live() {
845 size_t delta_in_survivor = 0;
846 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
847 const size_t space_alignment = heap->space_alignment();
848 const size_t gen_alignment = heap->generation_alignment();
849
850 MutableSpace* space_shrinking = NULL;
851 if (from_space()->end() > to_space()->end()) {
852 space_shrinking = from_space();
853 } else {
854 space_shrinking = to_space();
855 }
856
857 // Include any space that is committed but not included in
858 // the survivor spaces.
859 assert(((HeapWord*)virtual_space()->high()) >= space_shrinking->end(),
860 "Survivor space beyond high end");
861 size_t unused_committed = pointer_delta(virtual_space()->high(),
862 space_shrinking->end(), sizeof(char));
863
864 if (space_shrinking->is_empty()) {
865 // Don't let the space shrink to 0
866 assert(space_shrinking->capacity_in_bytes() >= space_alignment,
910 // Was there a shrink of the survivor space?
911 if (new_end < space_shrinking->end()) {
912 MemRegion mr(space_shrinking->bottom(), new_end);
913 space_shrinking->initialize(mr,
914 SpaceDecorator::DontClear,
915 SpaceDecorator::Mangle);
916 }
917 }
918
919 // This method currently does not expect to expand into eden (i.e.,
920 // the virtual space boundaries is expected to be consistent
921 // with the eden boundaries..
922 void PSYoungGen::post_resize() {
923 assert_locked_or_safepoint(Heap_lock);
924 assert((eden_space()->bottom() < to_space()->bottom()) &&
925 (eden_space()->bottom() < from_space()->bottom()),
926 "Eden is assumed to be below the survivor spaces");
927
928 MemRegion cmr((HeapWord*)virtual_space()->low(),
929 (HeapWord*)virtual_space()->high());
930 Universe::heap()->barrier_set()->resize_covered_region(cmr);
931 space_invariants();
932 }
933
934
935
936 void PSYoungGen::update_counters() {
937 if (UsePerfData) {
938 _eden_counters->update_all();
939 _from_counters->update_all();
940 _to_counters->update_all();
941 _gen_counters->update_all();
942 }
943 }
944
945 void PSYoungGen::verify() {
946 eden_space()->verify();
947 from_space()->verify();
948 to_space()->verify();
949 }
950
|
45 assert(_init_gen_size != 0, "Should have a finite size");
46 _virtual_space = new PSVirtualSpace(rs, alignment);
47 if (!virtual_space()->expand_by(_init_gen_size)) {
48 vm_exit_during_initialization("Could not reserve enough space for "
49 "object heap");
50 }
51 }
52
53 void PSYoungGen::initialize(ReservedSpace rs, size_t alignment) {
54 initialize_virtual_space(rs, alignment);
55 initialize_work();
56 }
57
58 void PSYoungGen::initialize_work() {
59
60 _reserved = MemRegion((HeapWord*)virtual_space()->low_boundary(),
61 (HeapWord*)virtual_space()->high_boundary());
62
63 MemRegion cmr((HeapWord*)virtual_space()->low(),
64 (HeapWord*)virtual_space()->high());
65 ParallelScavengeHeap::heap()->barrier_set()->resize_covered_region(cmr);
66
67 if (ZapUnusedHeapArea) {
68 // Mangle newly committed space immediately because it
69 // can be done here more simply that after the new
70 // spaces have been computed.
71 SpaceMangler::mangle_region(cmr);
72 }
73
74 if (UseNUMA) {
75 _eden_space = new MutableNUMASpace(virtual_space()->alignment());
76 } else {
77 _eden_space = new MutableSpace(virtual_space()->alignment());
78 }
79 _from_space = new MutableSpace(virtual_space()->alignment());
80 _to_space = new MutableSpace(virtual_space()->alignment());
81
82 if (_eden_space == NULL || _from_space == NULL || _to_space == NULL) {
83 vm_exit_during_initialization("Could not allocate a young gen space");
84 }
85
86 // Allocate the mark sweep views of spaces
87 _eden_mark_sweep =
88 new PSMarkSweepDecorator(_eden_space, NULL, MarkSweepDeadRatio);
89 _from_mark_sweep =
90 new PSMarkSweepDecorator(_from_space, NULL, MarkSweepDeadRatio);
91 _to_mark_sweep =
92 new PSMarkSweepDecorator(_to_space, NULL, MarkSweepDeadRatio);
93
94 if (_eden_mark_sweep == NULL ||
95 _from_mark_sweep == NULL ||
96 _to_mark_sweep == NULL) {
97 vm_exit_during_initialization("Could not complete allocation"
98 " of the young generation");
99 }
100
101 // Generation Counters - generation 0, 3 subspaces
102 _gen_counters = new PSGenerationCounters("new", 0, 3, _min_gen_size,
103 _max_gen_size, _virtual_space);
104
105 // Compute maximum space sizes for performance counters
106 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
107 size_t alignment = heap->space_alignment();
108 size_t size = virtual_space()->reserved_size();
109
110 size_t max_survivor_size;
111 size_t max_eden_size;
112
113 if (UseAdaptiveSizePolicy) {
114 max_survivor_size = size / MinSurvivorRatio;
115
116 // round the survivor space size down to the nearest alignment
117 // and make sure its size is greater than 0.
118 max_survivor_size = align_size_down(max_survivor_size, alignment);
119 max_survivor_size = MAX2(max_survivor_size, alignment);
120
121 // set the maximum size of eden to be the size of the young gen
122 // less two times the minimum survivor size. The minimum survivor
123 // size for UseAdaptiveSizePolicy is one alignment.
124 max_eden_size = size - 2 * alignment;
125 } else {
126 max_survivor_size = size / InitialSurvivorRatio;
136 // is dependent on the committed portion (current capacity) of the
137 // generation - the less space committed, the smaller the survivor
138 // space, possibly as small as an alignment. However, we are interested
139 // in the case where the young generation is 100% committed, as this
140 // is the point where eden reaches its maximum size. At this point,
141 // the size of a survivor space is max_survivor_size.
142 max_eden_size = size - 2 * max_survivor_size;
143 }
144
145 _eden_counters = new SpaceCounters("eden", 0, max_eden_size, _eden_space,
146 _gen_counters);
147 _from_counters = new SpaceCounters("s0", 1, max_survivor_size, _from_space,
148 _gen_counters);
149 _to_counters = new SpaceCounters("s1", 2, max_survivor_size, _to_space,
150 _gen_counters);
151
152 compute_initial_space_boundaries();
153 }
154
155 void PSYoungGen::compute_initial_space_boundaries() {
156 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
157
158 // Compute sizes
159 size_t alignment = heap->space_alignment();
160 size_t size = virtual_space()->committed_size();
161 assert(size >= 3 * alignment, "Young space is not large enough for eden + 2 survivors");
162
163 size_t survivor_size = size / InitialSurvivorRatio;
164 survivor_size = align_size_down(survivor_size, alignment);
165 // ... but never less than an alignment
166 survivor_size = MAX2(survivor_size, alignment);
167
168 // Young generation is eden + 2 survivor spaces
169 size_t eden_size = size - (2 * survivor_size);
170
171 // Now go ahead and set 'em.
172 set_space_boundaries(eden_size, survivor_size);
173 space_invariants();
174
175 if (UsePerfData) {
176 _eden_counters->update_capacity();
190 char *to_start = eden_start + eden_size;
191 char *from_start = to_start + survivor_size;
192 char *from_end = from_start + survivor_size;
193
194 assert(from_end == virtual_space()->high(), "just checking");
195 assert(is_object_aligned((intptr_t)eden_start), "checking alignment");
196 assert(is_object_aligned((intptr_t)to_start), "checking alignment");
197 assert(is_object_aligned((intptr_t)from_start), "checking alignment");
198
199 MemRegion eden_mr((HeapWord*)eden_start, (HeapWord*)to_start);
200 MemRegion to_mr ((HeapWord*)to_start, (HeapWord*)from_start);
201 MemRegion from_mr((HeapWord*)from_start, (HeapWord*)from_end);
202
203 eden_space()->initialize(eden_mr, true, ZapUnusedHeapArea);
204 to_space()->initialize(to_mr , true, ZapUnusedHeapArea);
205 from_space()->initialize(from_mr, true, ZapUnusedHeapArea);
206 }
207
208 #ifndef PRODUCT
209 void PSYoungGen::space_invariants() {
210 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
211 const size_t alignment = heap->space_alignment();
212
213 // Currently, our eden size cannot shrink to zero
214 guarantee(eden_space()->capacity_in_bytes() >= alignment, "eden too small");
215 guarantee(from_space()->capacity_in_bytes() >= alignment, "from too small");
216 guarantee(to_space()->capacity_in_bytes() >= alignment, "to too small");
217
218 // Relationship of spaces to each other
219 char* eden_start = (char*)eden_space()->bottom();
220 char* eden_end = (char*)eden_space()->end();
221 char* from_start = (char*)from_space()->bottom();
222 char* from_end = (char*)from_space()->end();
223 char* to_start = (char*)to_space()->bottom();
224 char* to_end = (char*)to_space()->end();
225
226 guarantee(eden_start >= virtual_space()->low(), "eden bottom");
227 guarantee(eden_start < eden_end, "eden space consistency");
228 guarantee(from_start < from_end, "from space consistency");
229 guarantee(to_start < to_end, "to space consistency");
230
476 sizeof(char)));
477 }
478
479 // There's nothing to do if the new sizes are the same as the current
480 if (requested_survivor_size == to_space()->capacity_in_bytes() &&
481 requested_survivor_size == from_space()->capacity_in_bytes() &&
482 requested_eden_size == eden_space()->capacity_in_bytes()) {
483 if (PrintAdaptiveSizePolicy && Verbose) {
484 gclog_or_tty->print_cr(" capacities are the right sizes, returning");
485 }
486 return;
487 }
488
489 char* eden_start = (char*)eden_space()->bottom();
490 char* eden_end = (char*)eden_space()->end();
491 char* from_start = (char*)from_space()->bottom();
492 char* from_end = (char*)from_space()->end();
493 char* to_start = (char*)to_space()->bottom();
494 char* to_end = (char*)to_space()->end();
495
496 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
497 const size_t alignment = heap->space_alignment();
498 const bool maintain_minimum =
499 (requested_eden_size + 2 * requested_survivor_size) <= min_gen_size();
500
501 bool eden_from_to_order = from_start < to_start;
502 // Check whether from space is below to space
503 if (eden_from_to_order) {
504 // Eden, from, to
505 eden_from_to_order = true;
506 if (PrintAdaptiveSizePolicy && Verbose) {
507 gclog_or_tty->print_cr(" Eden, from, to:");
508 }
509
510 // Set eden
511 // "requested_eden_size" is a goal for the size of eden
512 // and may not be attainable. "eden_size" below is
513 // calculated based on the location of from-space and
514 // the goal for the size of eden. from-space is
515 // fixed in place because it contains live data.
516 // The calculation is done this way to avoid 32bit
528 sizeof(char));
529 } else {
530 eden_size = MIN2(requested_eden_size,
531 pointer_delta(from_start, eden_start, sizeof(char)));
532 }
533
534 eden_end = eden_start + eden_size;
535 assert(eden_end >= eden_start, "addition overflowed");
536
537 // To may resize into from space as long as it is clear of live data.
538 // From space must remain page aligned, though, so we need to do some
539 // extra calculations.
540
541 // First calculate an optimal to-space
542 to_end = (char*)virtual_space()->high();
543 to_start = (char*)pointer_delta(to_end, (char*)requested_survivor_size,
544 sizeof(char));
545
546 // Does the optimal to-space overlap from-space?
547 if (to_start < (char*)from_space()->end()) {
548 // Calculate the minimum offset possible for from_end
549 size_t from_size = pointer_delta(from_space()->top(), from_start, sizeof(char));
550
551 // Should we be in this method if from_space is empty? Why not the set_space method? FIX ME!
552 if (from_size == 0) {
553 from_size = alignment;
554 } else {
555 from_size = align_size_up(from_size, alignment);
556 }
557
558 from_end = from_start + from_size;
559 assert(from_end > from_start, "addition overflow or from_size problem");
560
561 guarantee(from_end <= (char*)from_space()->end(), "from_end moved to the right");
562
563 // Now update to_start with the new from_end
564 to_start = MAX2(from_end, to_start);
565 }
566
567 guarantee(to_start != to_end, "to space is zero sized");
688 HeapWord* limit = (HeapWord*) virtual_space()->high();
689 eden_space()->check_mangled_unused_area(limit);
690 from_space()->check_mangled_unused_area(limit);
691 to_space()->check_mangled_unused_area(limit);
692 }
693 // When an existing space is being initialized, it is not
694 // mangled because the space has been previously mangled.
695 eden_space()->initialize(edenMR,
696 SpaceDecorator::Clear,
697 SpaceDecorator::DontMangle);
698 to_space()->initialize(toMR,
699 SpaceDecorator::Clear,
700 SpaceDecorator::DontMangle);
701 from_space()->initialize(fromMR,
702 SpaceDecorator::DontClear,
703 SpaceDecorator::DontMangle);
704
705 assert(from_space()->top() == old_from_top, "from top changed!");
706
707 if (PrintAdaptiveSizePolicy) {
708 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
709 gclog_or_tty->print("AdaptiveSizePolicy::survivor space sizes: "
710 "collection: %d "
711 "(" SIZE_FORMAT ", " SIZE_FORMAT ") -> "
712 "(" SIZE_FORMAT ", " SIZE_FORMAT ") ",
713 heap->total_collections(),
714 old_from, old_to,
715 from_space()->capacity_in_bytes(),
716 to_space()->capacity_in_bytes());
717 gclog_or_tty->cr();
718 }
719 }
720
721 void PSYoungGen::swap_spaces() {
722 MutableSpace* s = from_space();
723 _from_space = to_space();
724 _to_space = s;
725
726 // Now update the decorators.
727 PSMarkSweepDecorator* md = from_mark_sweep();
728 _from_mark_sweep = to_mark_sweep();
821 size_t PSYoungGen::available_for_expansion() {
822 ShouldNotReachHere();
823 return 0;
824 }
825
826 size_t PSYoungGen::available_for_contraction() {
827 ShouldNotReachHere();
828 return 0;
829 }
830
831 size_t PSYoungGen::available_to_min_gen() {
832 assert(virtual_space()->committed_size() >= min_gen_size(), "Invariant");
833 return virtual_space()->committed_size() - min_gen_size();
834 }
835
836 // This method assumes that from-space has live data and that
837 // any shrinkage of the young gen is limited by location of
838 // from-space.
839 size_t PSYoungGen::available_to_live() {
840 size_t delta_in_survivor = 0;
841 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
842 const size_t space_alignment = heap->space_alignment();
843 const size_t gen_alignment = heap->generation_alignment();
844
845 MutableSpace* space_shrinking = NULL;
846 if (from_space()->end() > to_space()->end()) {
847 space_shrinking = from_space();
848 } else {
849 space_shrinking = to_space();
850 }
851
852 // Include any space that is committed but not included in
853 // the survivor spaces.
854 assert(((HeapWord*)virtual_space()->high()) >= space_shrinking->end(),
855 "Survivor space beyond high end");
856 size_t unused_committed = pointer_delta(virtual_space()->high(),
857 space_shrinking->end(), sizeof(char));
858
859 if (space_shrinking->is_empty()) {
860 // Don't let the space shrink to 0
861 assert(space_shrinking->capacity_in_bytes() >= space_alignment,
905 // Was there a shrink of the survivor space?
906 if (new_end < space_shrinking->end()) {
907 MemRegion mr(space_shrinking->bottom(), new_end);
908 space_shrinking->initialize(mr,
909 SpaceDecorator::DontClear,
910 SpaceDecorator::Mangle);
911 }
912 }
913
914 // This method currently does not expect to expand into eden (i.e.,
915 // the virtual space boundaries is expected to be consistent
916 // with the eden boundaries..
917 void PSYoungGen::post_resize() {
918 assert_locked_or_safepoint(Heap_lock);
919 assert((eden_space()->bottom() < to_space()->bottom()) &&
920 (eden_space()->bottom() < from_space()->bottom()),
921 "Eden is assumed to be below the survivor spaces");
922
923 MemRegion cmr((HeapWord*)virtual_space()->low(),
924 (HeapWord*)virtual_space()->high());
925 ParallelScavengeHeap::heap()->barrier_set()->resize_covered_region(cmr);
926 space_invariants();
927 }
928
929
930
931 void PSYoungGen::update_counters() {
932 if (UsePerfData) {
933 _eden_counters->update_all();
934 _from_counters->update_all();
935 _to_counters->update_all();
936 _gen_counters->update_all();
937 }
938 }
939
940 void PSYoungGen::verify() {
941 eden_space()->verify();
942 from_space()->verify();
943 to_space()->verify();
944 }
945
|