1 /* 2 * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_VM_GC_IMPLEMENTATION_PARNEW_PARNEWGENERATION_HPP 26 #define SHARE_VM_GC_IMPLEMENTATION_PARNEW_PARNEWGENERATION_HPP 27 28 #include "gc_implementation/shared/gcTrace.hpp" 29 #include "gc_implementation/shared/parGCAllocBuffer.hpp" 30 #include "gc_implementation/shared/copyFailedInfo.hpp" 31 #include "memory/defNewGeneration.hpp" 32 #include "utilities/taskqueue.hpp" 33 34 class ChunkArray; 35 class ParScanWithoutBarrierClosure; 36 class ParScanWithBarrierClosure; 37 class ParRootScanWithoutBarrierClosure; 38 class ParRootScanWithBarrierTwoGensClosure; 39 class ParEvacuateFollowersClosure; 40 41 // It would be better if these types could be kept local to the .cpp file, 42 // but they must be here to allow ParScanClosure::do_oop_work to be defined 43 // in genOopClosures.inline.hpp. 44 45 typedef Padded<OopTaskQueue> ObjToScanQueue; 46 typedef GenericTaskQueueSet<ObjToScanQueue, mtGC> ObjToScanQueueSet; 47 48 class ParKeepAliveClosure: public DefNewGeneration::KeepAliveClosure { 49 private: 50 ParScanWeakRefClosure* _par_cl; 51 protected: 52 template <class T> void do_oop_work(T* p); 53 public: 54 ParKeepAliveClosure(ParScanWeakRefClosure* cl); 55 virtual void do_oop(oop* p); 56 virtual void do_oop(narrowOop* p); 57 }; 58 59 // The state needed by thread performing parallel young-gen collection. 60 class ParScanThreadState { 61 friend class ParScanThreadStateSet; 62 private: 63 ObjToScanQueue *_work_queue; 64 Stack<oop, mtGC>* const _overflow_stack; 65 66 ParGCAllocBuffer _to_space_alloc_buffer; 67 68 ParScanWithoutBarrierClosure _to_space_closure; // scan_without_gc_barrier 69 ParScanWithBarrierClosure _old_gen_closure; // scan_with_gc_barrier 70 ParRootScanWithoutBarrierClosure _to_space_root_closure; // scan_root_without_gc_barrier 71 // One of these two will be passed to process_strong_roots, which will 72 // set its generation. The first is for two-gen configs where the 73 // old gen collects the perm gen; the second is for arbitrary configs. 74 // The second isn't used right now (it used to be used for the train, an 75 // incremental collector) but the declaration has been left as a reminder. 76 ParRootScanWithBarrierTwoGensClosure _older_gen_closure; 77 // This closure will always be bound to the old gen; it will be used 78 // in evacuate_followers. 79 ParRootScanWithBarrierTwoGensClosure _old_gen_root_closure; // scan_old_root_with_gc_barrier 80 ParEvacuateFollowersClosure _evacuate_followers; 81 DefNewGeneration::IsAliveClosure _is_alive_closure; 82 ParScanWeakRefClosure _scan_weak_ref_closure; 83 ParKeepAliveClosure _keep_alive_closure; 84 85 86 Space* _to_space; 87 Space* to_space() { return _to_space; } 88 89 ParNewGeneration* _young_gen; 90 ParNewGeneration* young_gen() const { return _young_gen; } 91 92 Generation* _old_gen; 93 Generation* old_gen() { return _old_gen; } 94 95 HeapWord *_young_old_boundary; 96 97 int _hash_seed; 98 int _thread_num; 99 ageTable _ageTable; 100 101 bool _to_space_full; 102 103 #if TASKQUEUE_STATS 104 size_t _term_attempts; 105 size_t _overflow_refills; 106 size_t _overflow_refill_objs; 107 #endif // TASKQUEUE_STATS 108 109 // Stats for promotion failure 110 PromotionFailedInfo _promotion_failed_info; 111 112 // Timing numbers. 113 double _start; 114 double _start_strong_roots; 115 double _strong_roots_time; 116 double _start_term; 117 double _term_time; 118 119 // Helper for trim_queues. Scans subset of an array and makes 120 // remainder available for work stealing. 121 void scan_partial_array_and_push_remainder(oop obj); 122 123 // In support of CMS' parallel rescan of survivor space. 124 ChunkArray* _survivor_chunk_array; 125 ChunkArray* survivor_chunk_array() { return _survivor_chunk_array; } 126 127 void record_survivor_plab(HeapWord* plab_start, size_t plab_word_size); 128 129 ParScanThreadState(Space* to_space_, ParNewGeneration* gen_, 130 Generation* old_gen_, int thread_num_, 131 ObjToScanQueueSet* work_queue_set_, 132 Stack<oop, mtGC>* overflow_stacks_, 133 size_t desired_plab_sz_, 134 ParallelTaskTerminator& term_); 135 136 public: 137 ageTable* age_table() {return &_ageTable;} 138 139 ObjToScanQueue* work_queue() { return _work_queue; } 140 141 ParGCAllocBuffer* to_space_alloc_buffer() { 142 return &_to_space_alloc_buffer; 143 } 144 145 ParEvacuateFollowersClosure& evacuate_followers_closure() { return _evacuate_followers; } 146 DefNewGeneration::IsAliveClosure& is_alive_closure() { return _is_alive_closure; } 147 ParScanWeakRefClosure& scan_weak_ref_closure() { return _scan_weak_ref_closure; } 148 ParKeepAliveClosure& keep_alive_closure() { return _keep_alive_closure; } 149 ParScanClosure& older_gen_closure() { return _older_gen_closure; } 150 ParRootScanWithoutBarrierClosure& to_space_root_closure() { return _to_space_root_closure; }; 151 152 // Decrease queue size below "max_size". 153 void trim_queues(int max_size); 154 155 // Private overflow stack usage 156 Stack<oop, mtGC>* overflow_stack() { return _overflow_stack; } 157 bool take_from_overflow_stack(); 158 void push_on_overflow_stack(oop p); 159 160 // Is new_obj a candidate for scan_partial_array_and_push_remainder method. 161 inline bool should_be_partially_scanned(oop new_obj, oop old_obj) const; 162 163 int* hash_seed() { return &_hash_seed; } 164 int thread_num() { return _thread_num; } 165 166 // Allocate a to-space block of size "sz", or else return NULL. 167 HeapWord* alloc_in_to_space_slow(size_t word_sz); 168 169 HeapWord* alloc_in_to_space(size_t word_sz) { 170 HeapWord* obj = to_space_alloc_buffer()->allocate(word_sz); 171 if (obj != NULL) return obj; 172 else return alloc_in_to_space_slow(word_sz); 173 } 174 175 HeapWord* young_old_boundary() { return _young_old_boundary; } 176 177 void set_young_old_boundary(HeapWord *boundary) { 178 _young_old_boundary = boundary; 179 } 180 181 // Undo the most recent allocation ("obj", of "word_sz"). 182 void undo_alloc_in_to_space(HeapWord* obj, size_t word_sz); 183 184 // Promotion failure stats 185 void register_promotion_failure(size_t sz) { 186 _promotion_failed_info.register_copy_failure(sz); 187 } 188 PromotionFailedInfo& promotion_failed_info() { 189 return _promotion_failed_info; 190 } 191 bool promotion_failed() { 192 return _promotion_failed_info.has_failed(); 193 } 194 void print_promotion_failure_size(); 195 196 #if TASKQUEUE_STATS 197 TaskQueueStats & taskqueue_stats() const { return _work_queue->stats; } 198 199 size_t term_attempts() const { return _term_attempts; } 200 size_t overflow_refills() const { return _overflow_refills; } 201 size_t overflow_refill_objs() const { return _overflow_refill_objs; } 202 203 void note_term_attempt() { ++_term_attempts; } 204 void note_overflow_refill(size_t objs) { 205 ++_overflow_refills; _overflow_refill_objs += objs; 206 } 207 208 void reset_stats(); 209 #endif // TASKQUEUE_STATS 210 211 void start_strong_roots() { 212 _start_strong_roots = os::elapsedTime(); 213 } 214 void end_strong_roots() { 215 _strong_roots_time += (os::elapsedTime() - _start_strong_roots); 216 } 217 double strong_roots_time() const { return _strong_roots_time; } 218 void start_term_time() { 219 TASKQUEUE_STATS_ONLY(note_term_attempt()); 220 _start_term = os::elapsedTime(); 221 } 222 void end_term_time() { 223 _term_time += (os::elapsedTime() - _start_term); 224 } 225 double term_time() const { return _term_time; } 226 227 double elapsed_time() const { 228 return os::elapsedTime() - _start; 229 } 230 }; 231 232 class ParNewGenTask: public AbstractGangTask { 233 private: 234 ParNewGeneration* _gen; 235 Generation* _next_gen; 236 HeapWord* _young_old_boundary; 237 class ParScanThreadStateSet* _state_set; 238 239 public: 240 ParNewGenTask(ParNewGeneration* gen, 241 Generation* next_gen, 242 HeapWord* young_old_boundary, 243 ParScanThreadStateSet* state_set); 244 245 HeapWord* young_old_boundary() { return _young_old_boundary; } 246 247 void work(uint worker_id); 248 249 // Reset the terminator in ParScanThreadStateSet for 250 // "active_workers" threads. 251 virtual void set_for_termination(int active_workers); 252 }; 253 254 class KeepAliveClosure: public DefNewGeneration::KeepAliveClosure { 255 protected: 256 template <class T> void do_oop_work(T* p); 257 public: 258 KeepAliveClosure(ScanWeakRefClosure* cl); 259 virtual void do_oop(oop* p); 260 virtual void do_oop(narrowOop* p); 261 }; 262 263 class EvacuateFollowersClosureGeneral: public VoidClosure { 264 private: 265 GenCollectedHeap* _gch; 266 int _level; 267 OopsInGenClosure* _scan_cur_or_nonheap; 268 OopsInGenClosure* _scan_older; 269 public: 270 EvacuateFollowersClosureGeneral(GenCollectedHeap* gch, int level, 271 OopsInGenClosure* cur, 272 OopsInGenClosure* older); 273 virtual void do_void(); 274 }; 275 276 // Closure for scanning ParNewGeneration. 277 // Same as ScanClosure, except does parallel GC barrier. 278 class ScanClosureWithParBarrier: public ScanClosure { 279 protected: 280 template <class T> void do_oop_work(T* p); 281 public: 282 ScanClosureWithParBarrier(ParNewGeneration* g, bool gc_barrier); 283 virtual void do_oop(oop* p); 284 virtual void do_oop(narrowOop* p); 285 }; 286 287 // Implements AbstractRefProcTaskExecutor for ParNew. 288 class ParNewRefProcTaskExecutor: public AbstractRefProcTaskExecutor { 289 private: 290 ParNewGeneration& _generation; 291 ParScanThreadStateSet& _state_set; 292 public: 293 ParNewRefProcTaskExecutor(ParNewGeneration& generation, 294 ParScanThreadStateSet& state_set) 295 : _generation(generation), _state_set(state_set) 296 { } 297 298 // Executes a task using worker threads. 299 virtual void execute(ProcessTask& task); 300 virtual void execute(EnqueueTask& task); 301 // Switch to single threaded mode. 302 virtual void set_single_threaded_mode(); 303 }; 304 305 306 // A Generation that does parallel young-gen collection. 307 308 class ParNewGeneration: public DefNewGeneration { 309 friend class ParNewGenTask; 310 friend class ParNewRefProcTask; 311 friend class ParNewRefProcTaskExecutor; 312 friend class ParScanThreadStateSet; 313 friend class ParEvacuateFollowersClosure; 314 315 private: 316 // The per-worker-thread work queues 317 ObjToScanQueueSet* _task_queues; 318 319 // Per-worker-thread local overflow stacks 320 Stack<oop, mtGC>* _overflow_stacks; 321 322 // Desired size of survivor space plab's 323 PLABStats _plab_stats; 324 325 // A list of from-space images of to-be-scanned objects, threaded through 326 // klass-pointers (klass information already copied to the forwarded 327 // image.) Manipulated with CAS. 328 oop _overflow_list; 329 NOT_PRODUCT(ssize_t _num_par_pushes;) 330 331 // If true, older generation does not support promotion undo, so avoid. 332 static bool _avoid_promotion_undo; 333 334 // This closure is used by the reference processor to filter out 335 // references to live referent. 336 DefNewGeneration::IsAliveClosure _is_alive_closure; 337 338 static oop real_forwardee_slow(oop obj); 339 static void waste_some_time(); 340 341 // Preserve the mark of "obj", if necessary, in preparation for its mark 342 // word being overwritten with a self-forwarding-pointer. 343 void preserve_mark_if_necessary(oop obj, markOop m); 344 345 void handle_promotion_failed(GenCollectedHeap* gch, ParScanThreadStateSet& thread_state_set, ParNewTracer& gc_tracer); 346 347 protected: 348 349 bool _survivor_overflow; 350 351 bool avoid_promotion_undo() { return _avoid_promotion_undo; } 352 void set_avoid_promotion_undo(bool v) { _avoid_promotion_undo = v; } 353 354 bool survivor_overflow() { return _survivor_overflow; } 355 void set_survivor_overflow(bool v) { _survivor_overflow = v; } 356 357 // Adjust the tenuring threshold. See the implementation for 358 // the details of the policy. 359 virtual void adjust_desired_tenuring_threshold(); 360 361 public: 362 ParNewGeneration(ReservedSpace rs, size_t initial_byte_size, int level); 363 364 ~ParNewGeneration() { 365 for (uint i = 0; i < ParallelGCThreads; i++) 366 delete _task_queues->queue(i); 367 368 delete _task_queues; 369 } 370 371 static bool in_use(); 372 373 virtual void ref_processor_init(); 374 virtual Generation::Name kind() { return Generation::ParNew; } 375 virtual const char* name() const; 376 virtual const char* short_name() const { return "ParNew"; } 377 378 // override 379 virtual bool refs_discovery_is_mt() const { 380 assert(UseParNewGC, "ParNewGeneration only when UseParNewGC"); 381 return ParallelGCThreads > 1; 382 } 383 384 // Make the collection virtual. 385 virtual void collect(bool full, 386 bool clear_all_soft_refs, 387 size_t size, 388 bool is_tlab); 389 390 // This needs to be visible to the closure function. 391 // "obj" is the object to be copied, "m" is a recent value of its mark 392 // that must not contain a forwarding pointer (though one might be 393 // inserted in "obj"s mark word by a parallel thread). 394 inline oop copy_to_survivor_space(ParScanThreadState* par_scan_state, 395 oop obj, size_t obj_sz, markOop m) { 396 if (_avoid_promotion_undo) { 397 return copy_to_survivor_space_avoiding_promotion_undo(par_scan_state, 398 obj, obj_sz, m); 399 } 400 401 return copy_to_survivor_space_with_undo(par_scan_state, obj, obj_sz, m); 402 } 403 404 oop copy_to_survivor_space_avoiding_promotion_undo(ParScanThreadState* par_scan_state, 405 oop obj, size_t obj_sz, markOop m); 406 407 oop copy_to_survivor_space_with_undo(ParScanThreadState* par_scan_state, 408 oop obj, size_t obj_sz, markOop m); 409 410 // in support of testing overflow code 411 NOT_PRODUCT(int _overflow_counter;) 412 NOT_PRODUCT(bool should_simulate_overflow();) 413 414 // Accessor for overflow list 415 oop overflow_list() { return _overflow_list; } 416 417 // Push the given (from-space) object on the global overflow list. 418 void push_on_overflow_list(oop from_space_obj, ParScanThreadState* par_scan_state); 419 420 // If the global overflow list is non-empty, move some tasks from it 421 // onto "work_q" (which need not be empty). No more than 1/4 of the 422 // available space on "work_q" is used. 423 bool take_from_overflow_list(ParScanThreadState* par_scan_state); 424 bool take_from_overflow_list_work(ParScanThreadState* par_scan_state); 425 426 // The task queues to be used by parallel GC threads. 427 ObjToScanQueueSet* task_queues() { 428 return _task_queues; 429 } 430 431 PLABStats* plab_stats() { 432 return &_plab_stats; 433 } 434 435 size_t desired_plab_sz() { 436 return _plab_stats.desired_plab_sz(); 437 } 438 439 static oop real_forwardee(oop obj); 440 441 DEBUG_ONLY(static bool is_legal_forward_ptr(oop p);) 442 }; 443 444 #endif // SHARE_VM_GC_IMPLEMENTATION_PARNEW_PARNEWGENERATION_HPP