1 #ifdef USE_PRAGMA_IDENT_HDR
2 #pragma ident "@(#)psPromotionManager.hpp 1.20 07/09/25 16:47:42 JVM"
3 #endif
4 /*
5 * Copyright 2002-2007 Sun Microsystems, Inc. All Rights Reserved.
6 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
7 *
8 * This code is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License version 2 only, as
10 * published by the Free Software Foundation.
11 *
12 * This code is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 * version 2 for more details (a copy is included in the LICENSE file that
16 * accompanied this code).
17 *
18 * You should have received a copy of the GNU General Public License version
19 * 2 along with this work; if not, write to the Free Software Foundation,
20 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
21 *
22 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23 * CA 95054 USA or visit www.sun.com if you need additional information or
24 * have any questions.
25 *
28 //
29 // psPromotionManager is used by a single thread to manage object survival
30 // during a scavenge. The promotion manager contains thread local data only.
31 //
32 // NOTE! Be carefull when allocating the stacks on cheap. If you are going
33 // to use a promotion manager in more than one thread, the stacks MUST be
34 // on cheap. This can lead to memory leaks, though, as they are not auto
35 // deallocated.
36 //
37 // FIX ME FIX ME Add a destructor, and don't rely on the user to drain/flush/deallocate!
38 //
39
40 // Move to some global location
41 #define HAS_BEEN_MOVED 0x1501d01d
42 // End move to some global location
43
44 class MutableSpace;
45 class PSOldGen;
46 class ParCompactionManager;
47
48 #define PS_CHUNKED_ARRAY_OOP_MASK 1
49
50 #define PS_PM_STATS 0
51
52 class PSPromotionManager : public CHeapObj {
53 friend class PSScavenge;
54 friend class PSRefProcTaskExecutor;
55 private:
56 static PSPromotionManager** _manager_array;
57 static OopStarTaskQueueSet* _stack_array_depth;
58 static OopTaskQueueSet* _stack_array_breadth;
59 static PSOldGen* _old_gen;
60 static MutableSpace* _young_space;
61
62 #if PS_PM_STATS
63 uint _total_pushes;
64 uint _masked_pushes;
65
66 uint _overflow_pushes;
67 uint _max_overflow_length;
68
69 uint _arrays_chunked;
70 uint _array_chunks_processed;
71
72 uint _total_steals;
73 uint _masked_steals;
74
75 void print_stats(uint i);
76 static void print_stats();
77 #endif // PS_PM_STATS
78
79 PSYoungPromotionLAB _young_lab;
80 PSOldPromotionLAB _old_lab;
81 bool _young_gen_is_full;
82 bool _old_gen_is_full;
83 PrefetchQueue _prefetch_queue;
84
85 OopStarTaskQueue _claimed_stack_depth;
86 GrowableArray<oop*>* _overflow_stack_depth;
87 OopTaskQueue _claimed_stack_breadth;
88 GrowableArray<oop>* _overflow_stack_breadth;
89
90 bool _depth_first;
91 bool _totally_drain;
92 uint _target_stack_size;
93
94 uint _array_chunk_size;
95 uint _min_array_size_for_chunking;
96
97 // Accessors
98 static PSOldGen* old_gen() { return _old_gen; }
99 static MutableSpace* young_space() { return _young_space; }
100
101 inline static PSPromotionManager* manager_array(int index);
102
103 GrowableArray<oop*>* overflow_stack_depth() { return _overflow_stack_depth; }
104 GrowableArray<oop>* overflow_stack_breadth() { return _overflow_stack_breadth; }
105
106 // On the task queues we push reference locations as well as
107 // partially-scanned arrays (in the latter case, we push an oop to
108 // the from-space image of the array and the length on the
109 // from-space image indicates how many entries on the array we still
110 // need to scan; this is basically how ParNew does partial array
111 // scanning too). To be able to distinguish between reference
112 // locations and partially-scanned array oops we simply mask the
113 // latter oops with 0x01. The next three methods do the masking,
114 // unmasking, and checking whether the oop is masked or not. Notice
115 // that the signature of the mask and unmask methods looks a bit
116 // strange, as they accept and return different types (oop and
117 // oop*). This is because of the difference in types between what
118 // the task queue holds (oop*) and oops to partially-scanned arrays
119 // (oop). We do all the necessary casting in the mask / unmask
120 // methods to avoid sprinkling the rest of the code with more casts.
121
122 bool is_oop_masked(oop* p) {
123 return ((intptr_t) p & PS_CHUNKED_ARRAY_OOP_MASK) == PS_CHUNKED_ARRAY_OOP_MASK;
124 }
125
126 oop* mask_chunked_array_oop(oop obj) {
127 assert(!is_oop_masked((oop*) obj), "invariant");
128 oop* ret = (oop*) ((intptr_t) obj | PS_CHUNKED_ARRAY_OOP_MASK);
129 assert(is_oop_masked(ret), "invariant");
130 return ret;
131 }
132
133 oop unmask_chunked_array_oop(oop* p) {
134 assert(is_oop_masked(p), "invariant");
135 oop ret = oop((intptr_t) p & ~PS_CHUNKED_ARRAY_OOP_MASK);
136 assert(!is_oop_masked((oop*) ret), "invariant");
137 return ret;
138 }
139
140 void process_array_chunk(oop old);
141
142 void push_depth(oop* p) {
143 assert(depth_first(), "pre-condition");
144
145 #if PS_PM_STATS
146 ++_total_pushes;
147 #endif // PS_PM_STATS
148
149 if (!claimed_stack_depth()->push(p)) {
150 overflow_stack_depth()->push(p);
151 #if PS_PM_STATS
152 ++_overflow_pushes;
153 uint stack_length = (uint) overflow_stack_depth()->length();
154 if (stack_length > _max_overflow_length) {
155 _max_overflow_length = stack_length;
156 }
157 #endif // PS_PM_STATS
158 }
159 }
160
161 void push_breadth(oop o) {
162 assert(!depth_first(), "pre-condition");
213
214 bool young_gen_is_full() { return _young_gen_is_full; }
215
216 bool old_gen_is_full() { return _old_gen_is_full; }
217 void set_old_gen_is_full(bool state) { _old_gen_is_full = state; }
218
219 // Promotion methods
220 oop copy_to_survivor_space(oop o, bool depth_first);
221 oop oop_promotion_failed(oop obj, markOop obj_mark);
222
223 void reset();
224
225 void flush_labs();
226 void drain_stacks(bool totally_drain) {
227 if (depth_first()) {
228 drain_stacks_depth(totally_drain);
229 } else {
230 drain_stacks_breadth(totally_drain);
231 }
232 }
233 void drain_stacks_cond_depth() {
234 if (claimed_stack_depth()->size() > _target_stack_size) {
235 drain_stacks_depth(false);
236 }
237 }
238 void drain_stacks_depth(bool totally_drain);
239 void drain_stacks_breadth(bool totally_drain);
240
241 bool claimed_stack_empty() {
242 if (depth_first()) {
243 return claimed_stack_depth()->size() <= 0;
244 } else {
245 return claimed_stack_breadth()->size() <= 0;
246 }
247 }
248 bool overflow_stack_empty() {
249 if (depth_first()) {
250 return overflow_stack_depth()->length() <= 0;
251 } else {
252 return overflow_stack_breadth()->length() <= 0;
253 }
254 }
255 bool stacks_empty() {
256 return claimed_stack_empty() && overflow_stack_empty();
257 }
258 bool depth_first() {
259 return _depth_first;
260 }
261
262 inline void process_popped_location_depth(oop* p);
263
264 inline void flush_prefetch_queue();
265
266 inline void claim_or_forward_depth(oop* p);
267 inline void claim_or_forward_internal_depth(oop* p);
268
269 inline void claim_or_forward_breadth(oop* p);
270 inline void claim_or_forward_internal_breadth(oop* p);
271
272 #if PS_PM_STATS
273 void increment_steals(oop* p = NULL) {
274 _total_steals += 1;
275 if (p != NULL && is_oop_masked(p)) {
276 _masked_steals += 1;
277 }
278 }
279 #endif // PS_PM_STATS
280 };
|
1 #ifdef USE_PRAGMA_IDENT_HDR
2 #pragma ident "@(#)psPromotionManager.hpp 1.20 07/09/25 16:47:42 JVM"
3 #endif
4 /*
5 * Copyright 2002-2008 Sun Microsystems, Inc. All Rights Reserved.
6 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
7 *
8 * This code is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License version 2 only, as
10 * published by the Free Software Foundation.
11 *
12 * This code is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 * version 2 for more details (a copy is included in the LICENSE file that
16 * accompanied this code).
17 *
18 * You should have received a copy of the GNU General Public License version
19 * 2 along with this work; if not, write to the Free Software Foundation,
20 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
21 *
22 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23 * CA 95054 USA or visit www.sun.com if you need additional information or
24 * have any questions.
25 *
28 //
29 // psPromotionManager is used by a single thread to manage object survival
30 // during a scavenge. The promotion manager contains thread local data only.
31 //
32 // NOTE! Be carefull when allocating the stacks on cheap. If you are going
33 // to use a promotion manager in more than one thread, the stacks MUST be
34 // on cheap. This can lead to memory leaks, though, as they are not auto
35 // deallocated.
36 //
37 // FIX ME FIX ME Add a destructor, and don't rely on the user to drain/flush/deallocate!
38 //
39
40 // Move to some global location
41 #define HAS_BEEN_MOVED 0x1501d01d
42 // End move to some global location
43
44 class MutableSpace;
45 class PSOldGen;
46 class ParCompactionManager;
47
48 #define PS_PM_STATS 0
49
50 class PSPromotionManager : public CHeapObj {
51 friend class PSScavenge;
52 friend class PSRefProcTaskExecutor;
53 private:
54 static PSPromotionManager** _manager_array;
55 static OopStarTaskQueueSet* _stack_array_depth;
56 static OopTaskQueueSet* _stack_array_breadth;
57 static PSOldGen* _old_gen;
58 static MutableSpace* _young_space;
59
60 #if PS_PM_STATS
61 uint _total_pushes;
62 uint _masked_pushes;
63
64 uint _overflow_pushes;
65 uint _max_overflow_length;
66
67 uint _arrays_chunked;
68 uint _array_chunks_processed;
69
70 uint _total_steals;
71 uint _masked_steals;
72
73 void print_stats(uint i);
74 static void print_stats();
75 #endif // PS_PM_STATS
76
77 PSYoungPromotionLAB _young_lab;
78 PSOldPromotionLAB _old_lab;
79 bool _young_gen_is_full;
80 bool _old_gen_is_full;
81 PrefetchQueue _prefetch_queue;
82
83 OopStarTaskQueue _claimed_stack_depth;
84 GrowableArray<StarTask>* _overflow_stack_depth;
85 OopTaskQueue _claimed_stack_breadth;
86 GrowableArray<oop>* _overflow_stack_breadth;
87
88 bool _depth_first;
89 bool _totally_drain;
90 uint _target_stack_size;
91
92 uint _array_chunk_size;
93 uint _min_array_size_for_chunking;
94
95 // Accessors
96 static PSOldGen* old_gen() { return _old_gen; }
97 static MutableSpace* young_space() { return _young_space; }
98
99 inline static PSPromotionManager* manager_array(int index);
100 template <class T> inline void claim_or_forward_internal_depth(T* p);
101 template <class T> inline void claim_or_forward_internal_breadth(T* p);
102
103 GrowableArray<StarTask>* overflow_stack_depth() { return _overflow_stack_depth; }
104 GrowableArray<oop>* overflow_stack_breadth() { return _overflow_stack_breadth; }
105
106 // On the task queues we push reference locations as well as
107 // partially-scanned arrays (in the latter case, we push an oop to
108 // the from-space image of the array and the length on the
109 // from-space image indicates how many entries on the array we still
110 // need to scan; this is basically how ParNew does partial array
111 // scanning too). To be able to distinguish between reference
112 // locations and partially-scanned array oops we simply mask the
113 // latter oops with 0x01. The next three methods do the masking,
114 // unmasking, and checking whether the oop is masked or not. Notice
115 // that the signature of the mask and unmask methods looks a bit
116 // strange, as they accept and return different types (oop and
117 // oop*). This is because of the difference in types between what
118 // the task queue holds (oop*) and oops to partially-scanned arrays
119 // (oop). We do all the necessary casting in the mask / unmask
120 // methods to avoid sprinkling the rest of the code with more casts.
121
122 // These are added to the taskqueue so PS_CHUNKED_ARRAY_OOP_MASK (or any
123 // future masks) can't conflict with COMPRESSED_OOP_MASK
124 #define PS_CHUNKED_ARRAY_OOP_MASK 0x2
125
126 bool is_oop_masked(StarTask p) {
127 // If something is marked chunked it's always treated like wide oop*
128 return (((intptr_t)(oop*)p) & PS_CHUNKED_ARRAY_OOP_MASK) ==
129 PS_CHUNKED_ARRAY_OOP_MASK;
130 }
131
132 oop* mask_chunked_array_oop(oop obj) {
133 assert(!is_oop_masked((oop*) obj), "invariant");
134 oop* ret = (oop*) ((uintptr_t)obj | PS_CHUNKED_ARRAY_OOP_MASK);
135 assert(is_oop_masked(ret), "invariant");
136 return ret;
137 }
138
139 oop unmask_chunked_array_oop(StarTask p) {
140 assert(is_oop_masked(p), "invariant");
141 assert(!p.is_narrow(), "chunked array oops cannot be narrow");
142 oop *chunk = (oop*)p; // cast p to oop (uses conversion operator)
143 oop ret = oop((oop*)((uintptr_t)chunk & ~PS_CHUNKED_ARRAY_OOP_MASK));
144 assert(!is_oop_masked((oop*) ret), "invariant");
145 return ret;
146 }
147
148 template <class T> void process_array_chunk_work(oop obj,
149 int start, int end);
150 void process_array_chunk(oop old);
151
152 template <class T> void push_depth(T* p) {
153 assert(depth_first(), "pre-condition");
154
155 #if PS_PM_STATS
156 ++_total_pushes;
157 #endif // PS_PM_STATS
158
159 if (!claimed_stack_depth()->push(p)) {
160 overflow_stack_depth()->push(p);
161 #if PS_PM_STATS
162 ++_overflow_pushes;
163 uint stack_length = (uint) overflow_stack_depth()->length();
164 if (stack_length > _max_overflow_length) {
165 _max_overflow_length = stack_length;
166 }
167 #endif // PS_PM_STATS
168 }
169 }
170
171 void push_breadth(oop o) {
172 assert(!depth_first(), "pre-condition");
223
224 bool young_gen_is_full() { return _young_gen_is_full; }
225
226 bool old_gen_is_full() { return _old_gen_is_full; }
227 void set_old_gen_is_full(bool state) { _old_gen_is_full = state; }
228
229 // Promotion methods
230 oop copy_to_survivor_space(oop o, bool depth_first);
231 oop oop_promotion_failed(oop obj, markOop obj_mark);
232
233 void reset();
234
235 void flush_labs();
236 void drain_stacks(bool totally_drain) {
237 if (depth_first()) {
238 drain_stacks_depth(totally_drain);
239 } else {
240 drain_stacks_breadth(totally_drain);
241 }
242 }
243 public:
244 void drain_stacks_cond_depth() {
245 if (claimed_stack_depth()->size() > _target_stack_size) {
246 drain_stacks_depth(false);
247 }
248 }
249 void drain_stacks_depth(bool totally_drain);
250 void drain_stacks_breadth(bool totally_drain);
251
252 bool claimed_stack_empty() {
253 if (depth_first()) {
254 return claimed_stack_depth()->size() <= 0;
255 } else {
256 return claimed_stack_breadth()->size() <= 0;
257 }
258 }
259 bool overflow_stack_empty() {
260 if (depth_first()) {
261 return overflow_stack_depth()->length() <= 0;
262 } else {
263 return overflow_stack_breadth()->length() <= 0;
264 }
265 }
266 bool stacks_empty() {
267 return claimed_stack_empty() && overflow_stack_empty();
268 }
269 bool depth_first() {
270 return _depth_first;
271 }
272
273 inline void process_popped_location_depth(StarTask p);
274
275 inline void flush_prefetch_queue();
276 template <class T> inline void claim_or_forward_depth(T* p);
277 template <class T> inline void claim_or_forward_breadth(T* p);
278
279 #if PS_PM_STATS
280 void increment_steals(oop* p = NULL) {
281 _total_steals += 1;
282 if (p != NULL && is_oop_masked(p)) {
283 _masked_steals += 1;
284 }
285 }
286 #endif // PS_PM_STATS
287 };
|