1 #ifdef USE_PRAGMA_IDENT_HDR 2 #pragma ident "@(#)parNewGeneration.hpp 1.48 07/05/17 15:52:44 JVM" 3 #endif 4 /* 5 * Copyright 2001-2007 Sun Microsystems, Inc. All Rights Reserved. 6 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 7 * 8 * This code is free software; you can redistribute it and/or modify it 9 * under the terms of the GNU General Public License version 2 only, as 10 * published by the Free Software Foundation. 11 * 12 * This code is distributed in the hope that it will be useful, but WITHOUT 13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 15 * version 2 for more details (a copy is included in the LICENSE file that 16 * accompanied this code). 17 * 18 * You should have received a copy of the GNU General Public License version 19 * 2 along with this work; if not, write to the Free Software Foundation, 20 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 21 * 22 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, 23 * CA 95054 USA or visit www.sun.com if you need additional information or 24 * have any questions. 25 * 26 */ 27 28 class ChunkArray; 29 class ParScanWithoutBarrierClosure; 30 class ParScanWithBarrierClosure; 31 class ParRootScanWithoutBarrierClosure; 32 class ParRootScanWithBarrierTwoGensClosure; 33 class ParEvacuateFollowersClosure; 34 35 // It would be better if these types could be kept local to the .cpp file, 36 // but they must be here to allow ParScanClosure::do_oop_work to be defined 37 // in genOopClosures.inline.hpp. 38 39 40 typedef OopTaskQueue ObjToScanQueue; 41 typedef OopTaskQueueSet ObjToScanQueueSet; 42 43 // Enable this to get push/pop/steal stats. 44 const int PAR_STATS_ENABLED = 0; 45 46 class ParKeepAliveClosure: public DefNewGeneration::KeepAliveClosure { 47 ParScanWeakRefClosure* _par_cl; 48 public: 49 ParKeepAliveClosure(ParScanWeakRefClosure* cl); 50 void do_oop(oop* p); 51 }; 52 53 // The state needed by thread performing parallel young-gen collection. 54 class ParScanThreadState { 55 friend class ParScanThreadStateSet; 56 ObjToScanQueue *_work_queue; 57 58 ParGCAllocBuffer _to_space_alloc_buffer; 59 60 ParScanWithoutBarrierClosure _to_space_closure; // scan_without_gc_barrier 61 ParScanWithBarrierClosure _old_gen_closure; // scan_with_gc_barrier 62 ParRootScanWithoutBarrierClosure _to_space_root_closure; // scan_root_without_gc_barrier 63 // One of these two will be passed to process_strong_roots, which will 64 // set its generation. The first is for two-gen configs where the 65 // old gen collects the perm gen; the second is for arbitrary configs. 66 // The second isn't used right now (it used to be used for the train, an 67 // incremental collector) but the declaration has been left as a reminder. 68 ParRootScanWithBarrierTwoGensClosure _older_gen_closure; 69 // This closure will always be bound to the old gen; it will be used 70 // in evacuate_followers. 71 ParRootScanWithBarrierTwoGensClosure _old_gen_root_closure; // scan_old_root_with_gc_barrier 72 ParEvacuateFollowersClosure _evacuate_followers; 73 DefNewGeneration::IsAliveClosure _is_alive_closure; 74 ParScanWeakRefClosure _scan_weak_ref_closure; 75 ParKeepAliveClosure _keep_alive_closure; 76 77 78 Space* _to_space; 79 Space* to_space() { return _to_space; } 80 81 Generation* _old_gen; 82 Generation* old_gen() { return _old_gen; } 83 84 HeapWord *_young_old_boundary; 85 86 int _hash_seed; 87 int _thread_num; 88 ageTable _ageTable; 89 90 bool _to_space_full; 91 92 int _pushes, _pops, _steals, _steal_attempts, _term_attempts; 93 int _overflow_pushes, _overflow_refills, _overflow_refill_objs; 94 95 // Timing numbers. 96 double _start; 97 double _start_strong_roots; 98 double _strong_roots_time; 99 double _start_term; 100 double _term_time; 101 102 // Helper for trim_queues. Scans subset of an array and makes 103 // remainder available for work stealing. 104 void scan_partial_array_and_push_remainder(oop obj); 105 106 // In support of CMS' parallel rescan of survivor space. 107 ChunkArray* _survivor_chunk_array; 108 ChunkArray* survivor_chunk_array() { return _survivor_chunk_array; } 109 110 void record_survivor_plab(HeapWord* plab_start, size_t plab_word_size); 111 112 ParScanThreadState(Space* to_space_, ParNewGeneration* gen_, 113 Generation* old_gen_, int thread_num_, 114 ObjToScanQueueSet* work_queue_set_, size_t desired_plab_sz_, 115 ParallelTaskTerminator& term_); 116 117 public: 118 ageTable* age_table() {return &_ageTable;} 119 120 ObjToScanQueue* work_queue() { return _work_queue; } 121 122 ParGCAllocBuffer* to_space_alloc_buffer() { 123 return &_to_space_alloc_buffer; 124 } 125 126 ParEvacuateFollowersClosure& evacuate_followers_closure() { return _evacuate_followers; } 127 DefNewGeneration::IsAliveClosure& is_alive_closure() { return _is_alive_closure; } 128 ParScanWeakRefClosure& scan_weak_ref_closure() { return _scan_weak_ref_closure; } 129 ParKeepAliveClosure& keep_alive_closure() { return _keep_alive_closure; } 130 ParScanClosure& older_gen_closure() { return _older_gen_closure; } 131 ParRootScanWithoutBarrierClosure& to_space_root_closure() { return _to_space_root_closure; }; 132 133 // Decrease queue size below "max_size". 134 void trim_queues(int max_size); 135 136 // Is new_obj a candidate for scan_partial_array_and_push_remainder method. 137 inline bool should_be_partially_scanned(oop new_obj, oop old_obj) const; 138 139 int* hash_seed() { return &_hash_seed; } 140 int thread_num() { return _thread_num; } 141 142 // Allocate a to-space block of size "sz", or else return NULL. 143 HeapWord* alloc_in_to_space_slow(size_t word_sz); 144 145 HeapWord* alloc_in_to_space(size_t word_sz) { 146 HeapWord* obj = to_space_alloc_buffer()->allocate(word_sz); 147 if (obj != NULL) return obj; 148 else return alloc_in_to_space_slow(word_sz); 149 } 150 151 HeapWord* young_old_boundary() { return _young_old_boundary; } 152 153 void set_young_old_boundary(HeapWord *boundary) { 154 _young_old_boundary = boundary; 155 } 156 157 // Undo the most recent allocation ("obj", of "word_sz"). 158 void undo_alloc_in_to_space(HeapWord* obj, size_t word_sz); 159 160 int pushes() { return _pushes; } 161 int pops() { return _pops; } 162 int steals() { return _steals; } 163 int steal_attempts() { return _steal_attempts; } 164 int term_attempts() { return _term_attempts; } 165 int overflow_pushes() { return _overflow_pushes; } 166 int overflow_refills() { return _overflow_refills; } 167 int overflow_refill_objs() { return _overflow_refill_objs; } 168 169 void note_push() { if (PAR_STATS_ENABLED) _pushes++; } 170 void note_pop() { if (PAR_STATS_ENABLED) _pops++; } 171 void note_steal() { if (PAR_STATS_ENABLED) _steals++; } 172 void note_steal_attempt() { if (PAR_STATS_ENABLED) _steal_attempts++; } 173 void note_term_attempt() { if (PAR_STATS_ENABLED) _term_attempts++; } 174 void note_overflow_push() { if (PAR_STATS_ENABLED) _overflow_pushes++; } 175 void note_overflow_refill(int objs) { 176 if (PAR_STATS_ENABLED) { 177 _overflow_refills++; 178 _overflow_refill_objs += objs; 179 } 180 } 181 182 void start_strong_roots() { 183 _start_strong_roots = os::elapsedTime(); 184 } 185 void end_strong_roots() { 186 _strong_roots_time += (os::elapsedTime() - _start_strong_roots); 187 } 188 double strong_roots_time() { return _strong_roots_time; } 189 void start_term_time() { 190 note_term_attempt(); 191 _start_term = os::elapsedTime(); 192 } 193 void end_term_time() { 194 _term_time += (os::elapsedTime() - _start_term); 195 } 196 double term_time() { return _term_time; } 197 198 double elapsed() { 199 return os::elapsedTime() - _start; 200 } 201 202 }; 203 204 class ParNewGenTask: public AbstractGangTask { 205 ParNewGeneration* _gen; 206 Generation* _next_gen; 207 HeapWord* _young_old_boundary; 208 class ParScanThreadStateSet* _state_set; 209 210 public: 211 ParNewGenTask(ParNewGeneration* gen, 212 Generation* next_gen, 213 HeapWord* young_old_boundary, 214 ParScanThreadStateSet* state_set); 215 216 HeapWord* young_old_boundary() { return _young_old_boundary; } 217 218 void work(int i); 219 }; 220 221 class KeepAliveClosure: public DefNewGeneration::KeepAliveClosure { 222 public: 223 KeepAliveClosure(ScanWeakRefClosure* cl); 224 void do_oop(oop* p); 225 }; 226 227 class EvacuateFollowersClosureGeneral: public VoidClosure { 228 GenCollectedHeap* _gch; 229 int _level; 230 OopsInGenClosure* _scan_cur_or_nonheap; 231 OopsInGenClosure* _scan_older; 232 public: 233 EvacuateFollowersClosureGeneral(GenCollectedHeap* gch, int level, 234 OopsInGenClosure* cur, 235 OopsInGenClosure* older); 236 void do_void(); 237 }; 238 239 // Closure for scanning ParNewGeneration. 240 // Same as ScanClosure, except does parallel GC barrier. 241 class ScanClosureWithParBarrier: public ScanClosure { 242 public: 243 ScanClosureWithParBarrier(ParNewGeneration* g, bool gc_barrier); 244 void do_oop(oop* p); 245 }; 246 247 // Implements AbstractRefProcTaskExecutor for ParNew. 248 class ParNewRefProcTaskExecutor: public AbstractRefProcTaskExecutor { 249 public: 250 251 ParNewRefProcTaskExecutor(ParNewGeneration& generation, 252 ParScanThreadStateSet& state_set) 253 : _generation(generation), _state_set(state_set) 254 { } 255 256 // Executes a task using worker threads. 257 virtual void execute(ProcessTask& task); 258 virtual void execute(EnqueueTask& task); 259 // Switch to single threaded mode. 260 virtual void set_single_threaded_mode(); 261 private: 262 ParNewGeneration& _generation; 263 ParScanThreadStateSet& _state_set; 264 }; 265 266 267 // A Generation that does parallel young-gen collection. 268 269 class ParNewGeneration: public DefNewGeneration { 270 friend class ParNewGenTask; 271 friend class ParNewRefProcTask; 272 friend class ParNewRefProcTaskExecutor; 273 friend class ParScanThreadStateSet; 274 275 // XXX use a global constant instead of 64! 276 struct ObjToScanQueuePadded { 277 ObjToScanQueue work_queue; 278 char pad[64 - sizeof(ObjToScanQueue)]; // prevent false sharing 279 }; 280 281 // The per-thread work queues, available here for stealing. 282 ObjToScanQueueSet* _task_queues; 283 284 // Desired size of survivor space plab's 285 PLABStats _plab_stats; 286 287 // A list of from-space images of to-be-scanned objects, threaded through 288 // klass-pointers (klass information already copied to the forwarded 289 // image.) Manipulated with CAS. 290 oop _overflow_list; 291 292 // If true, older generation does not support promotion undo, so avoid. 293 static bool _avoid_promotion_undo; 294 295 // This closure is used by the reference processor to filter out 296 // references to live referent. 297 DefNewGeneration::IsAliveClosure _is_alive_closure; 298 299 static oop real_forwardee_slow(oop obj); 300 static void waste_some_time(); 301 302 // Preserve the mark of "obj", if necessary, in preparation for its mark 303 // word being overwritten with a self-forwarding-pointer. 304 void preserve_mark_if_necessary(oop obj, markOop m); 305 306 protected: 307 308 bool _survivor_overflow; 309 310 bool avoid_promotion_undo() { return _avoid_promotion_undo; } 311 void set_avoid_promotion_undo(bool v) { _avoid_promotion_undo = v; } 312 313 bool survivor_overflow() { return _survivor_overflow; } 314 void set_survivor_overflow(bool v) { _survivor_overflow = v; } 315 316 // Adjust the tenuring threshold. See the implementation for 317 // the details of the policy. 318 virtual void adjust_desired_tenuring_threshold(); 319 320 public: 321 ParNewGeneration(ReservedSpace rs, size_t initial_byte_size, int level); 322 323 ~ParNewGeneration() { 324 for (uint i = 0; i < ParallelGCThreads; i++) 325 delete _task_queues->queue(i); 326 327 delete _task_queues; 328 } 329 330 virtual void ref_processor_init(); 331 virtual Generation::Name kind() { return Generation::ParNew; } 332 virtual const char* name() const; 333 virtual const char* short_name() const { return "ParNew"; } 334 335 // override 336 virtual bool refs_discovery_is_mt() const { 337 assert(UseParNewGC, "ParNewGeneration only when UseParNewGC"); 338 return ParallelGCThreads > 1; 339 } 340 341 // Make the collection virtual. 342 virtual void collect(bool full, 343 bool clear_all_soft_refs, 344 size_t size, 345 bool is_tlab); 346 347 // This needs to be visible to the closure function. 348 // "obj" is the object to be copied, "m" is a recent value of its mark 349 // that must not contain a forwarding pointer (though one might be 350 // inserted in "obj"s mark word by a parallel thread). 351 inline oop copy_to_survivor_space(ParScanThreadState* par_scan_state, 352 oop obj, size_t obj_sz, markOop m) { 353 if (_avoid_promotion_undo) { 354 return copy_to_survivor_space_avoiding_promotion_undo(par_scan_state, 355 obj, obj_sz, m); 356 } 357 358 return copy_to_survivor_space_with_undo(par_scan_state, obj, obj_sz, m); 359 } 360 361 oop copy_to_survivor_space_avoiding_promotion_undo(ParScanThreadState* par_scan_state, 362 oop obj, size_t obj_sz, markOop m); 363 364 oop copy_to_survivor_space_with_undo(ParScanThreadState* par_scan_state, 365 oop obj, size_t obj_sz, markOop m); 366 367 // Push the given (from-space) object on the global overflow list. 368 void push_on_overflow_list(oop from_space_obj); 369 370 // If the global overflow list is non-empty, move some tasks from it 371 // onto "work_q" (which must be empty). No more than 1/4 of the 372 // max_elems of "work_q" are moved. 373 bool take_from_overflow_list(ParScanThreadState* par_scan_state); 374 375 // The task queues to be used by parallel GC threads. 376 ObjToScanQueueSet* task_queues() { 377 return _task_queues; 378 } 379 380 PLABStats* plab_stats() { 381 return &_plab_stats; 382 } 383 384 size_t desired_plab_sz() { 385 return _plab_stats.desired_plab_sz(); 386 } 387 388 static oop real_forwardee(oop obj); 389 390 DEBUG_ONLY(static bool is_legal_forward_ptr(oop p);) 391 };