1 /* 2 * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_VM_GC_SHARED_REFERENCEPROCESSOR_HPP 26 #define SHARE_VM_GC_SHARED_REFERENCEPROCESSOR_HPP 27 28 #include "gc/shared/referencePolicy.hpp" 29 #include "gc/shared/referenceProcessorPhaseTimes.hpp" 30 #include "gc/shared/referenceProcessorStats.hpp" 31 #include "memory/referenceType.hpp" 32 #include "oops/instanceRefKlass.hpp" 33 34 class GCTimer; 35 36 // ReferenceProcessor class encapsulates the per-"collector" processing 37 // of java.lang.Reference objects for GC. The interface is useful for supporting 38 // a generational abstraction, in particular when there are multiple 39 // generations that are being independently collected -- possibly 40 // concurrently and/or incrementally. 41 // ReferenceProcessor class abstracts away from a generational setting 42 // by using a closure that determines whether a given reference or referent are 43 // subject to this ReferenceProcessor's discovery, thus allowing its use in a 44 // straightforward manner in a general, non-generational, non-contiguous generation 45 // (or heap) setting. 46 // 47 48 // forward references 49 class ReferencePolicy; 50 class AbstractRefProcTaskExecutor; 51 52 // List of discovered references. 53 class DiscoveredList { 54 public: 55 DiscoveredList() : _len(0), _compressed_head(0), _oop_head(NULL) { } 56 inline oop head() const; 57 HeapWord* adr_head() { 58 return UseCompressedOops ? (HeapWord*)&_compressed_head : 59 (HeapWord*)&_oop_head; 60 } 61 inline void set_head(oop o); 62 inline bool is_empty() const; 63 size_t length() { return _len; } 64 void set_length(size_t len) { _len = len; } 65 void inc_length(size_t inc) { _len += inc; assert(_len > 0, "Error"); } 66 void dec_length(size_t dec) { _len -= dec; } 67 private: 68 // Set value depending on UseCompressedOops. This could be a template class 69 // but then we have to fix all the instantiations and declarations that use this class. 70 oop _oop_head; 71 narrowOop _compressed_head; 72 size_t _len; 73 }; 74 75 // Iterator for the list of discovered references. 76 class DiscoveredListIterator { 77 private: 78 DiscoveredList& _refs_list; 79 HeapWord* _prev_next; 80 oop _prev; 81 oop _ref; 82 HeapWord* _discovered_addr; 83 oop _next; 84 HeapWord* _referent_addr; 85 oop _referent; 86 OopClosure* _keep_alive; 87 BoolObjectClosure* _is_alive; 88 89 DEBUG_ONLY( 90 oop _first_seen; // cyclic linked list check 91 ) 92 93 NOT_PRODUCT( 94 size_t _processed; 95 size_t _removed; 96 ) 97 98 public: 99 inline DiscoveredListIterator(DiscoveredList& refs_list, 100 OopClosure* keep_alive, 101 BoolObjectClosure* is_alive); 102 103 // End Of List. 104 inline bool has_next() const { return _ref != NULL; } 105 106 // Get oop to the Reference object. 107 inline oop obj() const { return _ref; } 108 109 // Get oop to the referent object. 110 inline oop referent() const { return _referent; } 111 112 // Returns true if referent is alive. 113 inline bool is_referent_alive() const { 114 return _is_alive->do_object_b(_referent); 115 } 116 117 // Loads data for the current reference. 118 // The "allow_null_referent" argument tells us to allow for the possibility 119 // of a NULL referent in the discovered Reference object. This typically 120 // happens in the case of concurrent collectors that may have done the 121 // discovery concurrently, or interleaved, with mutator execution. 122 void load_ptrs(DEBUG_ONLY(bool allow_null_referent)); 123 124 // Move to the next discovered reference. 125 inline void next() { 126 _prev_next = _discovered_addr; 127 _prev = _ref; 128 move_to_next(); 129 } 130 131 // Remove the current reference from the list 132 void remove(); 133 134 // Make the referent alive. 135 inline void make_referent_alive() { 136 if (UseCompressedOops) { 137 _keep_alive->do_oop((narrowOop*)_referent_addr); 138 } else { 139 _keep_alive->do_oop((oop*)_referent_addr); 140 } 141 } 142 143 // NULL out referent pointer. 144 void clear_referent(); 145 146 // Statistics 147 NOT_PRODUCT( 148 inline size_t processed() const { return _processed; } 149 inline size_t removed() const { return _removed; } 150 ) 151 152 inline void move_to_next() { 153 if (_ref == _next) { 154 // End of the list. 155 _ref = NULL; 156 } else { 157 _ref = _next; 158 } 159 assert(_ref != _first_seen, "cyclic ref_list found"); 160 NOT_PRODUCT(_processed++); 161 } 162 }; 163 164 class ReferenceProcessor : public CHeapObj<mtGC> { 165 size_t total_count(DiscoveredList lists[]) const; 166 167 // The SoftReference master timestamp clock 168 static jlong _soft_ref_timestamp_clock; 169 170 BoolObjectClosure* _is_subject_to_discovery; // determines whether a given oop is subject 171 // to this ReferenceProcessor's discovery 172 // (and further processing). 173 174 bool _discovering_refs; // true when discovery enabled 175 bool _discovery_is_atomic; // if discovery is atomic wrt 176 // other collectors in configuration 177 bool _discovery_is_mt; // true if reference discovery is MT. 178 179 bool _enqueuing_is_done; // true if all weak references enqueued 180 bool _processing_is_mt; // true during phases when 181 // reference processing is MT. 182 uint _next_id; // round-robin mod _num_q counter in 183 // support of work distribution 184 185 // For collectors that do not keep GC liveness information 186 // in the object header, this field holds a closure that 187 // helps the reference processor determine the reachability 188 // of an oop. It is currently initialized to NULL for all 189 // collectors except for CMS and G1. 190 BoolObjectClosure* _is_alive_non_header; 191 192 // Soft ref clearing policies 193 // . the default policy 194 static ReferencePolicy* _default_soft_ref_policy; 195 // . the "clear all" policy 196 static ReferencePolicy* _always_clear_soft_ref_policy; 197 // . the current policy below is either one of the above 198 ReferencePolicy* _current_soft_ref_policy; 199 200 // The discovered ref lists themselves 201 202 // The active MT'ness degree of the queues below 203 uint _num_q; 204 // The maximum MT'ness degree of the queues below 205 uint _max_num_q; 206 207 // Master array of discovered oops 208 DiscoveredList* _discovered_refs; 209 210 // Arrays of lists of oops, one per thread (pointers into master array above) 211 DiscoveredList* _discoveredSoftRefs; 212 DiscoveredList* _discoveredWeakRefs; 213 DiscoveredList* _discoveredFinalRefs; 214 DiscoveredList* _discoveredPhantomRefs; 215 216 public: 217 static int number_of_subclasses_of_ref() { return (REF_PHANTOM - REF_OTHER); } 218 219 uint num_q() { return _num_q; } 220 uint max_num_q() { return _max_num_q; } 221 void set_active_mt_degree(uint v); 222 223 DiscoveredList* discovered_refs() { return _discovered_refs; } 224 225 ReferencePolicy* setup_policy(bool always_clear) { 226 _current_soft_ref_policy = always_clear ? 227 _always_clear_soft_ref_policy : _default_soft_ref_policy; 228 _current_soft_ref_policy->setup(); // snapshot the policy threshold 229 return _current_soft_ref_policy; 230 } 231 232 // Process references with a certain reachability level. 233 void process_discovered_reflist(DiscoveredList refs_lists[], 234 ReferencePolicy* policy, 235 bool clear_referent, 236 BoolObjectClosure* is_alive, 237 OopClosure* keep_alive, 238 VoidClosure* complete_gc, 239 AbstractRefProcTaskExecutor* task_executor, 240 ReferenceProcessorPhaseTimes* phase_times); 241 242 // Work methods used by the method process_discovered_reflist 243 // Phase1: keep alive all those referents that are otherwise 244 // dead but which must be kept alive by policy (and their closure). 245 void process_phase1(DiscoveredList& refs_list, 246 ReferencePolicy* policy, 247 BoolObjectClosure* is_alive, 248 OopClosure* keep_alive, 249 VoidClosure* complete_gc); 250 // Phase2: remove all those references whose referents are 251 // reachable. 252 void process_phase2(DiscoveredList& refs_list, 253 BoolObjectClosure* is_alive, 254 OopClosure* keep_alive, 255 VoidClosure* complete_gc); 256 // Work methods in support of process_phase2 257 void pp2_work(DiscoveredList& refs_list, 258 BoolObjectClosure* is_alive, 259 OopClosure* keep_alive); 260 void pp2_work_concurrent_discovery( 261 DiscoveredList& refs_list, 262 BoolObjectClosure* is_alive, 263 OopClosure* keep_alive, 264 VoidClosure* complete_gc); 265 // Phase3: process the referents by either clearing them 266 // or keeping them alive (and their closure) 267 void process_phase3(DiscoveredList& refs_list, 268 bool clear_referent, 269 BoolObjectClosure* is_alive, 270 OopClosure* keep_alive, 271 VoidClosure* complete_gc); 272 273 // Enqueue references with a certain reachability level 274 void enqueue_discovered_reflist(DiscoveredList& refs_list); 275 276 // "Preclean" all the discovered reference lists 277 // by removing references with strongly reachable referents. 278 // The first argument is a predicate on an oop that indicates 279 // its (strong) reachability and the second is a closure that 280 // may be used to incrementalize or abort the precleaning process. 281 // The caller is responsible for taking care of potential 282 // interference with concurrent operations on these lists 283 // (or predicates involved) by other threads. Currently 284 // only used by the CMS collector. 285 void preclean_discovered_references(BoolObjectClosure* is_alive, 286 OopClosure* keep_alive, 287 VoidClosure* complete_gc, 288 YieldClosure* yield, 289 GCTimer* gc_timer); 290 291 // Returns the name of the discovered reference list 292 // occupying the i / _num_q slot. 293 const char* list_name(uint i); 294 295 void enqueue_discovered_reflists(AbstractRefProcTaskExecutor* task_executor, 296 ReferenceProcessorPhaseTimes* phase_times); 297 298 // "Preclean" the given discovered reference list 299 // by removing references with strongly reachable referents. 300 // Currently used in support of CMS only. 301 void preclean_discovered_reflist(DiscoveredList& refs_list, 302 BoolObjectClosure* is_alive, 303 OopClosure* keep_alive, 304 VoidClosure* complete_gc, 305 YieldClosure* yield); 306 private: 307 // round-robin mod _num_q (not: _not_ mode _max_num_q) 308 uint next_id() { 309 uint id = _next_id; 310 assert(!_discovery_is_mt, "Round robin should only be used in serial discovery"); 311 if (++_next_id == _num_q) { 312 _next_id = 0; 313 } 314 assert(_next_id < _num_q, "_next_id %u _num_q %u _max_num_q %u", _next_id, _num_q, _max_num_q); 315 return id; 316 } 317 DiscoveredList* get_discovered_list(ReferenceType rt); 318 inline void add_to_discovered_list_mt(DiscoveredList& refs_list, oop obj, 319 HeapWord* discovered_addr); 320 321 void clear_discovered_references(DiscoveredList& refs_list); 322 323 void log_reflist_counts(DiscoveredList ref_lists[], uint active_length, size_t total_count) PRODUCT_RETURN; 324 325 // Balances reference queues. 326 void balance_queues(DiscoveredList ref_lists[]); 327 328 // Update (advance) the soft ref master clock field. 329 void update_soft_ref_master_clock(); 330 331 template <class T> 332 bool is_subject_to_discovery(T const obj) const; 333 public: 334 // Default parameters give you a vanilla reference processor. 335 ReferenceProcessor(BoolObjectClosure* is_subject_to_discovery, 336 bool mt_processing = false, uint mt_processing_degree = 1, 337 bool mt_discovery = false, uint mt_discovery_degree = 1, 338 bool atomic_discovery = true, 339 BoolObjectClosure* is_alive_non_header = NULL); 340 341 // RefDiscoveryPolicy values 342 enum DiscoveryPolicy { 343 ReferenceBasedDiscovery = 0, 344 ReferentBasedDiscovery = 1, 345 DiscoveryPolicyMin = ReferenceBasedDiscovery, 346 DiscoveryPolicyMax = ReferentBasedDiscovery 347 }; 348 349 static void init_statics(); 350 351 public: 352 // get and set "is_alive_non_header" field 353 BoolObjectClosure* is_alive_non_header() { 354 return _is_alive_non_header; 355 } 356 void set_is_alive_non_header(BoolObjectClosure* is_alive_non_header) { 357 _is_alive_non_header = is_alive_non_header; 358 } 359 360 BoolObjectClosure* is_subject_to_discovery_closure() const { return _is_subject_to_discovery; } 361 void set_is_subject_to_discovery_closure(BoolObjectClosure* cl) { _is_subject_to_discovery = cl; } 362 363 // start and stop weak ref discovery 364 void enable_discovery(bool check_no_refs = true); 365 void disable_discovery() { _discovering_refs = false; } 366 bool discovery_enabled() { return _discovering_refs; } 367 368 // whether discovery is atomic wrt other collectors 369 bool discovery_is_atomic() const { return _discovery_is_atomic; } 370 void set_atomic_discovery(bool atomic) { _discovery_is_atomic = atomic; } 371 372 // whether discovery is done by multiple threads same-old-timeously 373 bool discovery_is_mt() const { return _discovery_is_mt; } 374 void set_mt_discovery(bool mt) { _discovery_is_mt = mt; } 375 376 // Whether we are in a phase when _processing_ is MT. 377 bool processing_is_mt() const { return _processing_is_mt; } 378 void set_mt_processing(bool mt) { _processing_is_mt = mt; } 379 380 // whether all enqueueing of weak references is complete 381 bool enqueuing_is_done() { return _enqueuing_is_done; } 382 void set_enqueuing_is_done(bool v) { _enqueuing_is_done = v; } 383 384 // iterate over oops 385 void weak_oops_do(OopClosure* f); // weak roots 386 387 // Balance each of the discovered lists. 388 void balance_all_queues(); 389 void verify_list(DiscoveredList& ref_list); 390 391 // Discover a Reference object, using appropriate discovery criteria 392 bool discover_reference(oop obj, ReferenceType rt); 393 394 // Has discovered references that need handling 395 bool has_discovered_references(); 396 397 // Process references found during GC (called by the garbage collector) 398 ReferenceProcessorStats 399 process_discovered_references(BoolObjectClosure* is_alive, 400 OopClosure* keep_alive, 401 VoidClosure* complete_gc, 402 AbstractRefProcTaskExecutor* task_executor, 403 ReferenceProcessorPhaseTimes* phase_times); 404 405 // Enqueue references at end of GC (called by the garbage collector) 406 void enqueue_discovered_references(AbstractRefProcTaskExecutor* task_executor, 407 ReferenceProcessorPhaseTimes* phase_times); 408 409 // If a discovery is in process that is being superceded, abandon it: all 410 // the discovered lists will be empty, and all the objects on them will 411 // have NULL discovered fields. Must be called only at a safepoint. 412 void abandon_partial_discovery(); 413 414 size_t total_reference_count(ReferenceType rt) const; 415 416 // debugging 417 void verify_no_references_recorded() PRODUCT_RETURN; 418 void verify_referent(oop obj) PRODUCT_RETURN; 419 }; 420 421 // A reference processor that uses a single memory span to determine the area that 422 // is subject to discovery. Useful for collectors which have contiguous generations. 423 class SpanReferenceProcessor : public ReferenceProcessor { 424 class SpanBasedDiscoverer : public BoolObjectClosure { 425 public: 426 MemRegion _span; 427 428 SpanBasedDiscoverer(MemRegion span) : BoolObjectClosure(), _span(span) { } 429 430 virtual bool do_object_b(oop obj) { 431 return _span.contains(obj); 432 } 433 }; 434 435 SpanBasedDiscoverer _span_based_discoverer; 436 public: 437 SpanReferenceProcessor(MemRegion span, 438 bool mt_processing = false, uint mt_processing_degree = 1, 439 bool mt_discovery = false, uint mt_discovery_degree = 1, 440 bool atomic_discovery = true, 441 BoolObjectClosure* is_alive_non_header = NULL); 442 443 // get and set span 444 MemRegion span() { return _span_based_discoverer._span; } 445 void set_span(MemRegion span) { _span_based_discoverer._span = span; } 446 }; 447 448 // A utility class to disable reference discovery in 449 // the scope which contains it, for given ReferenceProcessor. 450 class NoRefDiscovery: StackObj { 451 private: 452 ReferenceProcessor* _rp; 453 bool _was_discovering_refs; 454 public: 455 NoRefDiscovery(ReferenceProcessor* rp) : _rp(rp) { 456 _was_discovering_refs = _rp->discovery_enabled(); 457 if (_was_discovering_refs) { 458 _rp->disable_discovery(); 459 } 460 } 461 462 ~NoRefDiscovery() { 463 if (_was_discovering_refs) { 464 _rp->enable_discovery(false /*check_no_refs*/); 465 } 466 } 467 }; 468 469 // A utility class to temporarily mutate the subject discovery closure of the 470 // given ReferenceProcessor in the scope that contains it. 471 class ReferenceProcessorSubjectToDiscoveryMutator : StackObj { 472 private: 473 ReferenceProcessor* _rp; 474 BoolObjectClosure* _saved_cl; 475 476 public: 477 ReferenceProcessorSubjectToDiscoveryMutator(ReferenceProcessor* rp, BoolObjectClosure* cl): 478 _rp(rp) { 479 _saved_cl = _rp->is_subject_to_discovery_closure(); 480 _rp->set_is_subject_to_discovery_closure(cl); 481 } 482 483 ~ReferenceProcessorSubjectToDiscoveryMutator() { 484 _rp->set_is_subject_to_discovery_closure(_saved_cl); 485 } 486 }; 487 488 // A utility class to temporarily mutate the span of the 489 // given ReferenceProcessor in the scope that contains it. 490 class ReferenceProcessorSpanMutator: StackObj { 491 private: 492 SpanReferenceProcessor* _rp; 493 MemRegion _saved_span; 494 495 public: 496 ReferenceProcessorSpanMutator(SpanReferenceProcessor* rp, 497 MemRegion span): 498 _rp(rp) { 499 _saved_span = _rp->span(); 500 _rp->set_span(span); 501 } 502 503 ~ReferenceProcessorSpanMutator() { 504 _rp->set_span(_saved_span); 505 } 506 }; 507 508 // A utility class to temporarily change the MT'ness of 509 // reference discovery for the given ReferenceProcessor 510 // in the scope that contains it. 511 class ReferenceProcessorMTDiscoveryMutator: StackObj { 512 private: 513 ReferenceProcessor* _rp; 514 bool _saved_mt; 515 516 public: 517 ReferenceProcessorMTDiscoveryMutator(ReferenceProcessor* rp, 518 bool mt): 519 _rp(rp) { 520 _saved_mt = _rp->discovery_is_mt(); 521 _rp->set_mt_discovery(mt); 522 } 523 524 ~ReferenceProcessorMTDiscoveryMutator() { 525 _rp->set_mt_discovery(_saved_mt); 526 } 527 }; 528 529 // A utility class to temporarily change the disposition 530 // of the "is_alive_non_header" closure field of the 531 // given ReferenceProcessor in the scope that contains it. 532 class ReferenceProcessorIsAliveMutator: StackObj { 533 private: 534 ReferenceProcessor* _rp; 535 BoolObjectClosure* _saved_cl; 536 537 public: 538 ReferenceProcessorIsAliveMutator(ReferenceProcessor* rp, 539 BoolObjectClosure* cl): 540 _rp(rp) { 541 _saved_cl = _rp->is_alive_non_header(); 542 _rp->set_is_alive_non_header(cl); 543 } 544 545 ~ReferenceProcessorIsAliveMutator() { 546 _rp->set_is_alive_non_header(_saved_cl); 547 } 548 }; 549 550 // A utility class to temporarily change the disposition 551 // of the "discovery_is_atomic" field of the 552 // given ReferenceProcessor in the scope that contains it. 553 class ReferenceProcessorAtomicMutator: StackObj { 554 private: 555 ReferenceProcessor* _rp; 556 bool _saved_atomic_discovery; 557 558 public: 559 ReferenceProcessorAtomicMutator(ReferenceProcessor* rp, 560 bool atomic): 561 _rp(rp) { 562 _saved_atomic_discovery = _rp->discovery_is_atomic(); 563 _rp->set_atomic_discovery(atomic); 564 } 565 566 ~ReferenceProcessorAtomicMutator() { 567 _rp->set_atomic_discovery(_saved_atomic_discovery); 568 } 569 }; 570 571 572 // A utility class to temporarily change the MT processing 573 // disposition of the given ReferenceProcessor instance 574 // in the scope that contains it. 575 class ReferenceProcessorMTProcMutator: StackObj { 576 private: 577 ReferenceProcessor* _rp; 578 bool _saved_mt; 579 580 public: 581 ReferenceProcessorMTProcMutator(ReferenceProcessor* rp, 582 bool mt): 583 _rp(rp) { 584 _saved_mt = _rp->processing_is_mt(); 585 _rp->set_mt_processing(mt); 586 } 587 588 ~ReferenceProcessorMTProcMutator() { 589 _rp->set_mt_processing(_saved_mt); 590 } 591 }; 592 593 594 // This class is an interface used to implement task execution for the 595 // reference processing. 596 class AbstractRefProcTaskExecutor { 597 public: 598 599 // Abstract tasks to execute. 600 class ProcessTask; 601 class EnqueueTask; 602 603 // Executes a task using worker threads. 604 virtual void execute(ProcessTask& task) = 0; 605 virtual void execute(EnqueueTask& task) = 0; 606 607 // Switch to single threaded mode. 608 virtual void set_single_threaded_mode() { }; 609 }; 610 611 // Abstract reference processing task to execute. 612 class AbstractRefProcTaskExecutor::ProcessTask { 613 protected: 614 ProcessTask(ReferenceProcessor& ref_processor, 615 DiscoveredList refs_lists[], 616 bool marks_oops_alive, 617 ReferenceProcessorPhaseTimes* phase_times) 618 : _ref_processor(ref_processor), 619 _refs_lists(refs_lists), 620 _phase_times(phase_times), 621 _marks_oops_alive(marks_oops_alive) 622 { } 623 624 public: 625 virtual void work(unsigned int work_id, BoolObjectClosure& is_alive, 626 OopClosure& keep_alive, 627 VoidClosure& complete_gc) = 0; 628 629 // Returns true if a task marks some oops as alive. 630 bool marks_oops_alive() const 631 { return _marks_oops_alive; } 632 633 protected: 634 ReferenceProcessor& _ref_processor; 635 DiscoveredList* _refs_lists; 636 ReferenceProcessorPhaseTimes* _phase_times; 637 const bool _marks_oops_alive; 638 }; 639 640 // Abstract reference processing task to execute. 641 class AbstractRefProcTaskExecutor::EnqueueTask { 642 protected: 643 EnqueueTask(ReferenceProcessor& ref_processor, 644 DiscoveredList refs_lists[], 645 int n_queues, 646 ReferenceProcessorPhaseTimes* phase_times) 647 : _ref_processor(ref_processor), 648 _refs_lists(refs_lists), 649 _n_queues(n_queues), 650 _phase_times(phase_times) 651 { } 652 653 public: 654 virtual void work(unsigned int work_id) = 0; 655 656 protected: 657 ReferenceProcessor& _ref_processor; 658 DiscoveredList* _refs_lists; 659 ReferenceProcessorPhaseTimes* _phase_times; 660 int _n_queues; 661 }; 662 663 #endif // SHARE_VM_GC_SHARED_REFERENCEPROCESSOR_HPP