Print this page
Split |
Close |
Expand all |
Collapse all |
--- old/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.inline.hpp
+++ new/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.inline.hpp
1 1 /*
2 - * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
2 + * Copyright (c) 2002, 2012, Oracle and/or its affiliates. All rights reserved.
3 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 4 *
5 5 * This code is free software; you can redistribute it and/or modify it
6 6 * under the terms of the GNU General Public License version 2 only, as
7 7 * published by the Free Software Foundation.
8 8 *
9 9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 12 * version 2 for more details (a copy is included in the LICENSE file that
13 13 * accompanied this code).
14 14 *
15 15 * You should have received a copy of the GNU General Public License version
16 16 * 2 along with this work; if not, write to the Free Software Foundation,
17 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 18 *
19 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 20 * or visit www.oracle.com if you need additional information or have any
21 21 * questions.
22 22 *
23 23 */
24 24
25 25 #ifndef SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PSPROMOTIONMANAGER_INLINE_HPP
26 26 #define SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PSPROMOTIONMANAGER_INLINE_HPP
27 27
28 28 #include "gc_implementation/parallelScavenge/psPromotionManager.hpp"
29 29 #include "gc_implementation/parallelScavenge/psScavenge.hpp"
30 30
31 31 inline PSPromotionManager* PSPromotionManager::manager_array(int index) {
32 32 assert(_manager_array != NULL, "access of NULL manager_array");
33 33 assert(index >= 0 && index <= (int)ParallelGCThreads, "out of range manager_array access");
34 34 return _manager_array[index];
35 35 }
36 36
37 37 template <class T>
38 38 inline void PSPromotionManager::claim_or_forward_internal_depth(T* p) {
39 39 if (p != NULL) { // XXX: error if p != NULL here
40 40 oop o = oopDesc::load_decode_heap_oop_not_null(p);
41 41 if (o->is_forwarded()) {
42 42 o = o->forwardee();
43 43 // Card mark
44 44 if (PSScavenge::is_obj_in_young((HeapWord*) o)) {
45 45 PSScavenge::card_table()->inline_write_ref_field_gc(p, o);
46 46 }
47 47 oopDesc::encode_store_heap_oop_not_null(p, o);
48 48 } else {
49 49 push_depth(p);
50 50 }
51 51 }
52 52 }
53 53
↓ open down ↓ |
41 lines elided |
↑ open up ↑ |
54 54 template <class T>
55 55 inline void PSPromotionManager::claim_or_forward_depth(T* p) {
56 56 assert(PSScavenge::should_scavenge(p, true), "revisiting object?");
57 57 assert(Universe::heap()->kind() == CollectedHeap::ParallelScavengeHeap,
58 58 "Sanity");
59 59 assert(Universe::heap()->is_in(p), "pointer outside heap");
60 60
61 61 claim_or_forward_internal_depth(p);
62 62 }
63 63
64 +//
65 +// This method is pretty bulky. It would be nice to split it up
66 +// into smaller submethods, but we need to be careful not to hurt
67 +// performance.
68 +//
69 +template<bool promote_immediately>
70 +oop PSPromotionManager::copy_to_survivor_space(oop o) {
71 + assert(PSScavenge::should_scavenge(&o), "Sanity");
72 +
73 + oop new_obj = NULL;
74 +
75 + // NOTE! We must be very careful with any methods that access the mark
76 + // in o. There may be multiple threads racing on it, and it may be forwarded
77 + // at any time. Do not use oop methods for accessing the mark!
78 + markOop test_mark = o->mark();
79 +
80 + // The same test as "o->is_forwarded()"
81 + if (!test_mark->is_marked()) {
82 + bool new_obj_is_tenured = false;
83 + size_t new_obj_size = o->size();
84 +
85 + if (!promote_immediately) {
86 + // Find the objects age, MT safe.
87 + int age = (test_mark->has_displaced_mark_helper() /* o->has_displaced_mark() */) ?
88 + test_mark->displaced_mark_helper()->age() : test_mark->age();
89 +
90 + // Try allocating obj in to-space (unless too old)
91 + if (age < PSScavenge::tenuring_threshold()) {
92 + new_obj = (oop) _young_lab.allocate(new_obj_size);
93 + if (new_obj == NULL && !_young_gen_is_full) {
94 + // Do we allocate directly, or flush and refill?
95 + if (new_obj_size > (YoungPLABSize / 2)) {
96 + // Allocate this object directly
97 + new_obj = (oop)young_space()->cas_allocate(new_obj_size);
98 + } else {
99 + // Flush and fill
100 + _young_lab.flush();
101 +
102 + HeapWord* lab_base = young_space()->cas_allocate(YoungPLABSize);
103 + if (lab_base != NULL) {
104 + _young_lab.initialize(MemRegion(lab_base, YoungPLABSize));
105 + // Try the young lab allocation again.
106 + new_obj = (oop) _young_lab.allocate(new_obj_size);
107 + } else {
108 + _young_gen_is_full = true;
109 + }
110 + }
111 + }
112 + }
113 + }
114 +
115 + // Otherwise try allocating obj tenured
116 + if (new_obj == NULL) {
117 +#ifndef PRODUCT
118 + if (Universe::heap()->promotion_should_fail()) {
119 + return oop_promotion_failed(o, test_mark);
120 + }
121 +#endif // #ifndef PRODUCT
122 +
123 + new_obj = (oop) _old_lab.allocate(new_obj_size);
124 + new_obj_is_tenured = true;
125 +
126 + if (new_obj == NULL) {
127 + if (!_old_gen_is_full) {
128 + // Do we allocate directly, or flush and refill?
129 + if (new_obj_size > (OldPLABSize / 2)) {
130 + // Allocate this object directly
131 + new_obj = (oop)old_gen()->cas_allocate(new_obj_size);
132 + } else {
133 + // Flush and fill
134 + _old_lab.flush();
135 +
136 + HeapWord* lab_base = old_gen()->cas_allocate(OldPLABSize);
137 + if(lab_base != NULL) {
138 + _old_lab.initialize(MemRegion(lab_base, OldPLABSize));
139 + // Try the old lab allocation again.
140 + new_obj = (oop) _old_lab.allocate(new_obj_size);
141 + }
142 + }
143 + }
144 +
145 + // This is the promotion failed test, and code handling.
146 + // The code belongs here for two reasons. It is slightly
147 + // different thatn the code below, and cannot share the
148 + // CAS testing code. Keeping the code here also minimizes
149 + // the impact on the common case fast path code.
150 +
151 + if (new_obj == NULL) {
152 + _old_gen_is_full = true;
153 + return oop_promotion_failed(o, test_mark);
154 + }
155 + }
156 + }
157 +
158 + assert(new_obj != NULL, "allocation should have succeeded");
159 +
160 + // Copy obj
161 + Copy::aligned_disjoint_words((HeapWord*)o, (HeapWord*)new_obj, new_obj_size);
162 +
163 + // Now we have to CAS in the header.
164 + if (o->cas_forward_to(new_obj, test_mark)) {
165 + // We won any races, we "own" this object.
166 + assert(new_obj == o->forwardee(), "Sanity");
167 +
168 + // Increment age if obj still in new generation. Now that
169 + // we're dealing with a markOop that cannot change, it is
170 + // okay to use the non mt safe oop methods.
171 + if (!new_obj_is_tenured) {
172 + new_obj->incr_age();
173 + assert(young_space()->contains(new_obj), "Attempt to push non-promoted obj");
174 + }
175 +
176 + // Do the size comparison first with new_obj_size, which we
177 + // already have. Hopefully, only a few objects are larger than
178 + // _min_array_size_for_chunking, and most of them will be arrays.
179 + // So, the is->objArray() test would be very infrequent.
180 + if (new_obj_size > _min_array_size_for_chunking &&
181 + new_obj->is_objArray() &&
182 + PSChunkLargeArrays) {
183 + // we'll chunk it
184 + oop* const masked_o = mask_chunked_array_oop(o);
185 + push_depth(masked_o);
186 + TASKQUEUE_STATS_ONLY(++_arrays_chunked; ++_masked_pushes);
187 + } else {
188 + // we'll just push its contents
189 + new_obj->push_contents(this);
190 + }
191 + } else {
192 + // We lost, someone else "owns" this object
193 + guarantee(o->is_forwarded(), "Object must be forwarded if the cas failed.");
194 +
195 + // Try to deallocate the space. If it was directly allocated we cannot
196 + // deallocate it, so we have to test. If the deallocation fails,
197 + // overwrite with a filler object.
198 + if (new_obj_is_tenured) {
199 + if (!_old_lab.unallocate_object((HeapWord*) new_obj, new_obj_size)) {
200 + CollectedHeap::fill_with_object((HeapWord*) new_obj, new_obj_size);
201 + }
202 + } else if (!_young_lab.unallocate_object((HeapWord*) new_obj, new_obj_size)) {
203 + CollectedHeap::fill_with_object((HeapWord*) new_obj, new_obj_size);
204 + }
205 +
206 + // don't update this before the unallocation!
207 + new_obj = o->forwardee();
208 + }
209 + } else {
210 + assert(o->is_forwarded(), "Sanity");
211 + new_obj = o->forwardee();
212 + }
213 +
214 +#ifdef DEBUG
215 + // This code must come after the CAS test, or it will print incorrect
216 + // information.
217 + if (TraceScavenge) {
218 + gclog_or_tty->print_cr("{%s %s " PTR_FORMAT " -> " PTR_FORMAT " (" SIZE_FORMAT ")}",
219 + PSScavenge::should_scavenge(&new_obj) ? "copying" : "tenuring",
220 + new_obj->blueprint()->internal_name(), o, new_obj, new_obj->size());
221 + }
222 +#endif
223 +
224 + return new_obj;
225 +}
226 +
227 +
64 228 inline void PSPromotionManager::process_popped_location_depth(StarTask p) {
65 229 if (is_oop_masked(p)) {
66 230 assert(PSChunkLargeArrays, "invariant");
67 231 oop const old = unmask_chunked_array_oop(p);
68 232 process_array_chunk(old);
69 233 } else {
70 234 if (p.is_narrow()) {
71 235 assert(UseCompressedOops, "Error");
72 - PSScavenge::copy_and_push_safe_barrier(this, (narrowOop*)p);
236 + PSScavenge::copy_and_push_safe_barrier<narrowOop, /*promote_immediately=*/false>(this, p);
73 237 } else {
74 - PSScavenge::copy_and_push_safe_barrier(this, (oop*)p);
238 + PSScavenge::copy_and_push_safe_barrier<oop, /*promote_immediately=*/false>(this, p);
75 239 }
76 240 }
77 241 }
78 242
79 243 #if TASKQUEUE_STATS
80 244 void PSPromotionManager::record_steal(StarTask& p) {
81 245 if (is_oop_masked(p)) {
82 246 ++_masked_steals;
83 247 }
84 248 }
85 249 #endif // TASKQUEUE_STATS
86 250
87 251 #endif // SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PSPROMOTIONMANAGER_INLINE_HPP
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX