1 /* 2 * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_VM_MEMORY_REFERENCEPROCESSOR_HPP 26 #define SHARE_VM_MEMORY_REFERENCEPROCESSOR_HPP 27 28 #include "memory/referencePolicy.hpp" 29 #include "oops/instanceRefKlass.hpp" 30 31 // ReferenceProcessor class encapsulates the per-"collector" processing 32 // of java.lang.Reference objects for GC. The interface is useful for supporting 33 // a generational abstraction, in particular when there are multiple 34 // generations that are being independently collected -- possibly 35 // concurrently and/or incrementally. Note, however, that the 36 // ReferenceProcessor class abstracts away from a generational setting 37 // by using only a heap interval (called "span" below), thus allowing 38 // its use in a straightforward manner in a general, non-generational 39 // setting. 40 // 41 // The basic idea is that each ReferenceProcessor object concerns 42 // itself with ("weak") reference processing in a specific "span" 43 // of the heap of interest to a specific collector. Currently, 44 // the span is a convex interval of the heap, but, efficiency 45 // apart, there seems to be no reason it couldn't be extended 46 // (with appropriate modifications) to any "non-convex interval". 47 48 // forward references 49 class ReferencePolicy; 50 class AbstractRefProcTaskExecutor; 51 52 // List of discovered references. 53 class DiscoveredList { 54 public: 55 DiscoveredList() : _len(0), _compressed_head(0), _oop_head(NULL) { } 56 oop head() const { 57 return UseCompressedOops ? oopDesc::decode_heap_oop(_compressed_head) : 58 _oop_head; 59 } 60 HeapWord* adr_head() { 61 return UseCompressedOops ? (HeapWord*)&_compressed_head : 62 (HeapWord*)&_oop_head; 63 } 64 void set_head(oop o) { 65 if (UseCompressedOops) { 66 // Must compress the head ptr. 67 _compressed_head = oopDesc::encode_heap_oop(o); 68 } else { 69 _oop_head = o; 70 } 71 } 72 bool is_empty() const { return head() == NULL; } 73 size_t length() { return _len; } 74 void set_length(size_t len) { _len = len; } 75 void inc_length(size_t inc) { _len += inc; assert(_len > 0, "Error"); } 76 void dec_length(size_t dec) { _len -= dec; } 77 private: 78 // Set value depending on UseCompressedOops. This could be a template class 79 // but then we have to fix all the instantiations and declarations that use this class. 80 oop _oop_head; 81 narrowOop _compressed_head; 82 size_t _len; 83 }; 84 85 // Iterator for the list of discovered references. 86 class DiscoveredListIterator { 87 private: 88 DiscoveredList& _refs_list; 89 HeapWord* _prev_next; 90 oop _prev; 91 oop _ref; 92 HeapWord* _discovered_addr; 93 oop _next; 94 HeapWord* _referent_addr; 95 oop _referent; 96 OopClosure* _keep_alive; 97 BoolObjectClosure* _is_alive; 98 99 DEBUG_ONLY( 100 oop _first_seen; // cyclic linked list check 101 ) 102 103 NOT_PRODUCT( 104 size_t _processed; 105 size_t _removed; 106 ) 107 108 public: 109 inline DiscoveredListIterator(DiscoveredList& refs_list, 110 OopClosure* keep_alive, 111 BoolObjectClosure* is_alive): 112 _refs_list(refs_list), 113 _prev_next(refs_list.adr_head()), 114 _prev(NULL), 115 _ref(refs_list.head()), 116 #ifdef ASSERT 117 _first_seen(refs_list.head()), 118 #endif 119 #ifndef PRODUCT 120 _processed(0), 121 _removed(0), 122 #endif 123 _next(NULL), 124 _keep_alive(keep_alive), 125 _is_alive(is_alive) 126 { } 127 128 // End Of List. 129 inline bool has_next() const { return _ref != NULL; } 130 131 // Get oop to the Reference object. 132 inline oop obj() const { return _ref; } 133 134 // Get oop to the referent object. 135 inline oop referent() const { return _referent; } 136 137 // Returns true if referent is alive. 138 inline bool is_referent_alive() const { 139 return _is_alive->do_object_b(_referent); 140 } 141 142 // Loads data for the current reference. 143 // The "allow_null_referent" argument tells us to allow for the possibility 144 // of a NULL referent in the discovered Reference object. This typically 145 // happens in the case of concurrent collectors that may have done the 146 // discovery concurrently, or interleaved, with mutator execution. 147 void load_ptrs(DEBUG_ONLY(bool allow_null_referent)); 148 149 // Move to the next discovered reference. 150 inline void next() { 151 _prev_next = _discovered_addr; 152 _prev = _ref; 153 move_to_next(); 154 } 155 156 // Remove the current reference from the list 157 void remove(); 158 159 // Make the Reference object active again. 160 void make_active(); 161 162 // Make the referent alive. 163 inline void make_referent_alive() { 164 if (UseCompressedOops) { 165 _keep_alive->do_oop((narrowOop*)_referent_addr); 166 } else { 167 _keep_alive->do_oop((oop*)_referent_addr); 168 } 169 } 170 171 // Update the discovered field. 172 inline void update_discovered() { 173 // First _prev_next ref actually points into DiscoveredList (gross). 174 if (UseCompressedOops) { 175 if (!oopDesc::is_null(*(narrowOop*)_prev_next)) { 176 _keep_alive->do_oop((narrowOop*)_prev_next); 177 } 178 } else { 179 if (!oopDesc::is_null(*(oop*)_prev_next)) { 180 _keep_alive->do_oop((oop*)_prev_next); 181 } 182 } 183 } 184 185 // NULL out referent pointer. 186 void clear_referent(); 187 188 // Statistics 189 NOT_PRODUCT( 190 inline size_t processed() const { return _processed; } 191 inline size_t removed() const { return _removed; } 192 ) 193 194 inline void move_to_next() { 195 if (_ref == _next) { 196 // End of the list. 197 _ref = NULL; 198 } else { 199 _ref = _next; 200 } 201 assert(_ref != _first_seen, "cyclic ref_list found"); 202 NOT_PRODUCT(_processed++); 203 } 204 205 }; 206 207 class ReferenceProcessor : public CHeapObj { 208 protected: 209 // Compatibility with pre-4965777 JDK's 210 static bool _pending_list_uses_discovered_field; 211 212 MemRegion _span; // (right-open) interval of heap 213 // subject to wkref discovery 214 215 bool _discovering_refs; // true when discovery enabled 216 bool _discovery_is_atomic; // if discovery is atomic wrt 217 // other collectors in configuration 218 bool _discovery_is_mt; // true if reference discovery is MT. 219 220 // If true, setting "next" field of a discovered refs list requires 221 // write barrier(s). (Must be true if used in a collector in which 222 // elements of a discovered list may be moved during discovery: for 223 // example, a collector like Garbage-First that moves objects during a 224 // long-term concurrent marking phase that does weak reference 225 // discovery.) 226 bool _discovered_list_needs_barrier; 227 228 BarrierSet* _bs; // Cached copy of BarrierSet. 229 bool _enqueuing_is_done; // true if all weak references enqueued 230 bool _processing_is_mt; // true during phases when 231 // reference processing is MT. 232 int _next_id; // round-robin mod _num_q counter in 233 // support of work distribution 234 235 // For collectors that do not keep GC liveness information 236 // in the object header, this field holds a closure that 237 // helps the reference processor determine the reachability 238 // of an oop. It is currently initialized to NULL for all 239 // collectors except for CMS and G1. 240 BoolObjectClosure* _is_alive_non_header; 241 242 // Soft ref clearing policies 243 // . the default policy 244 static ReferencePolicy* _default_soft_ref_policy; 245 // . the "clear all" policy 246 static ReferencePolicy* _always_clear_soft_ref_policy; 247 // . the current policy below is either one of the above 248 ReferencePolicy* _current_soft_ref_policy; 249 250 // The discovered ref lists themselves 251 252 // The active MT'ness degree of the queues below 253 int _num_q; 254 // The maximum MT'ness degree of the queues below 255 int _max_num_q; 256 // Arrays of lists of oops, one per thread 257 DiscoveredList* _discoveredSoftRefs; 258 DiscoveredList* _discoveredWeakRefs; 259 DiscoveredList* _discoveredFinalRefs; 260 DiscoveredList* _discoveredPhantomRefs; 261 262 public: 263 static int number_of_subclasses_of_ref() { return (REF_PHANTOM - REF_OTHER); } 264 265 int num_q() { return _num_q; } 266 int max_num_q() { return _max_num_q; } 267 void set_active_mt_degree(int v) { _num_q = v; } 268 DiscoveredList* discovered_soft_refs() { return _discoveredSoftRefs; } 269 270 ReferencePolicy* setup_policy(bool always_clear) { 271 _current_soft_ref_policy = always_clear ? 272 _always_clear_soft_ref_policy : _default_soft_ref_policy; 273 _current_soft_ref_policy->setup(); // snapshot the policy threshold 274 return _current_soft_ref_policy; 275 } 276 277 // Process references with a certain reachability level. 278 void process_discovered_reflist(DiscoveredList refs_lists[], 279 ReferencePolicy* policy, 280 bool clear_referent, 281 BoolObjectClosure* is_alive, 282 OopClosure* keep_alive, 283 VoidClosure* complete_gc, 284 AbstractRefProcTaskExecutor* task_executor); 285 286 void process_phaseJNI(BoolObjectClosure* is_alive, 287 OopClosure* keep_alive, 288 VoidClosure* complete_gc); 289 290 // Work methods used by the method process_discovered_reflist 291 // Phase1: keep alive all those referents that are otherwise 292 // dead but which must be kept alive by policy (and their closure). 293 void process_phase1(DiscoveredList& refs_list, 294 ReferencePolicy* policy, 295 BoolObjectClosure* is_alive, 296 OopClosure* keep_alive, 297 VoidClosure* complete_gc); 298 // Phase2: remove all those references whose referents are 299 // reachable. 300 inline void process_phase2(DiscoveredList& refs_list, 301 BoolObjectClosure* is_alive, 302 OopClosure* keep_alive, 303 VoidClosure* complete_gc) { 304 if (discovery_is_atomic()) { 305 // complete_gc is ignored in this case for this phase 306 pp2_work(refs_list, is_alive, keep_alive); 307 } else { 308 assert(complete_gc != NULL, "Error"); 309 pp2_work_concurrent_discovery(refs_list, is_alive, 310 keep_alive, complete_gc); 311 } 312 } 313 // Work methods in support of process_phase2 314 void pp2_work(DiscoveredList& refs_list, 315 BoolObjectClosure* is_alive, 316 OopClosure* keep_alive); 317 void pp2_work_concurrent_discovery( 318 DiscoveredList& refs_list, 319 BoolObjectClosure* is_alive, 320 OopClosure* keep_alive, 321 VoidClosure* complete_gc); 322 // Phase3: process the referents by either clearing them 323 // or keeping them alive (and their closure) 324 void process_phase3(DiscoveredList& refs_list, 325 bool clear_referent, 326 BoolObjectClosure* is_alive, 327 OopClosure* keep_alive, 328 VoidClosure* complete_gc); 329 330 // Enqueue references with a certain reachability level 331 void enqueue_discovered_reflist(DiscoveredList& refs_list, HeapWord* pending_list_addr); 332 333 // "Preclean" all the discovered reference lists 334 // by removing references with strongly reachable referents. 335 // The first argument is a predicate on an oop that indicates 336 // its (strong) reachability and the second is a closure that 337 // may be used to incrementalize or abort the precleaning process. 338 // The caller is responsible for taking care of potential 339 // interference with concurrent operations on these lists 340 // (or predicates involved) by other threads. Currently 341 // only used by the CMS collector. should_unload_classes is 342 // used to aid assertion checking when classes are collected. 343 void preclean_discovered_references(BoolObjectClosure* is_alive, 344 OopClosure* keep_alive, 345 VoidClosure* complete_gc, 346 YieldClosure* yield, 347 bool should_unload_classes); 348 349 // Delete entries in the discovered lists that have 350 // either a null referent or are not active. Such 351 // Reference objects can result from the clearing 352 // or enqueueing of Reference objects concurrent 353 // with their discovery by a (concurrent) collector. 354 // For a definition of "active" see java.lang.ref.Reference; 355 // Refs are born active, become inactive when enqueued, 356 // and never become active again. The state of being 357 // active is encoded as follows: A Ref is active 358 // if and only if its "next" field is NULL. 359 void clean_up_discovered_references(); 360 void clean_up_discovered_reflist(DiscoveredList& refs_list); 361 362 // Returns the name of the discovered reference list 363 // occupying the i / _num_q slot. 364 const char* list_name(int i); 365 366 void enqueue_discovered_reflists(HeapWord* pending_list_addr, AbstractRefProcTaskExecutor* task_executor); 367 368 protected: 369 // Set the 'discovered' field of the given reference to 370 // the given value - emitting barriers depending upon 371 // the value of _discovered_list_needs_barrier. 372 void set_discovered(oop ref, oop value); 373 374 // "Preclean" the given discovered reference list 375 // by removing references with strongly reachable referents. 376 // Currently used in support of CMS only. 377 void preclean_discovered_reflist(DiscoveredList& refs_list, 378 BoolObjectClosure* is_alive, 379 OopClosure* keep_alive, 380 VoidClosure* complete_gc, 381 YieldClosure* yield); 382 383 // round-robin mod _num_q (not: _not_ mode _max_num_q) 384 int next_id() { 385 int id = _next_id; 386 if (++_next_id == _num_q) { 387 _next_id = 0; 388 } 389 return id; 390 } 391 DiscoveredList* get_discovered_list(ReferenceType rt); 392 inline void add_to_discovered_list_mt(DiscoveredList& refs_list, oop obj, 393 HeapWord* discovered_addr); 394 void verify_ok_to_handle_reflists() PRODUCT_RETURN; 395 396 void clear_discovered_references(DiscoveredList& refs_list); 397 void abandon_partial_discovered_list(DiscoveredList& refs_list); 398 399 // Calculate the number of jni handles. 400 unsigned int count_jni_refs(); 401 402 // Balances reference queues. 403 void balance_queues(DiscoveredList ref_lists[]); 404 405 // Update (advance) the soft ref master clock field. 406 void update_soft_ref_master_clock(); 407 408 public: 409 // constructor 410 ReferenceProcessor(): 411 _span((HeapWord*)NULL, (HeapWord*)NULL), 412 _discoveredSoftRefs(NULL), _discoveredWeakRefs(NULL), 413 _discoveredFinalRefs(NULL), _discoveredPhantomRefs(NULL), 414 _discovering_refs(false), 415 _discovery_is_atomic(true), 416 _enqueuing_is_done(false), 417 _discovery_is_mt(false), 418 _discovered_list_needs_barrier(false), 419 _bs(NULL), 420 _is_alive_non_header(NULL), 421 _num_q(0), 422 _max_num_q(0), 423 _processing_is_mt(false), 424 _next_id(0) 425 { } 426 427 // Default parameters give you a vanilla reference processor. 428 ReferenceProcessor(MemRegion span, 429 bool mt_processing = false, int mt_processing_degree = 1, 430 bool mt_discovery = false, int mt_discovery_degree = 1, 431 bool atomic_discovery = true, 432 BoolObjectClosure* is_alive_non_header = NULL, 433 bool discovered_list_needs_barrier = false); 434 435 // RefDiscoveryPolicy values 436 enum DiscoveryPolicy { 437 ReferenceBasedDiscovery = 0, 438 ReferentBasedDiscovery = 1, 439 DiscoveryPolicyMin = ReferenceBasedDiscovery, 440 DiscoveryPolicyMax = ReferentBasedDiscovery 441 }; 442 443 static void init_statics(); 444 445 public: 446 // get and set "is_alive_non_header" field 447 BoolObjectClosure* is_alive_non_header() { 448 return _is_alive_non_header; 449 } 450 void set_is_alive_non_header(BoolObjectClosure* is_alive_non_header) { 451 _is_alive_non_header = is_alive_non_header; 452 } 453 454 // get and set span 455 MemRegion span() { return _span; } 456 void set_span(MemRegion span) { _span = span; } 457 458 // start and stop weak ref discovery 459 void enable_discovery(bool verify_disabled, bool check_no_refs) { 460 #ifdef ASSERT 461 // Verify that we're not currently discovering refs 462 assert(!verify_disabled || !_discovering_refs, "nested call?"); 463 464 if (check_no_refs) { 465 // Verify that the discovered lists are empty 466 verify_no_references_recorded(); 467 } 468 #endif // ASSERT 469 _discovering_refs = true; 470 } 471 472 void disable_discovery() { _discovering_refs = false; } 473 bool discovery_enabled() { return _discovering_refs; } 474 475 // whether discovery is atomic wrt other collectors 476 bool discovery_is_atomic() const { return _discovery_is_atomic; } 477 void set_atomic_discovery(bool atomic) { _discovery_is_atomic = atomic; } 478 479 // whether the JDK in which we are embedded is a pre-4965777 JDK, 480 // and thus whether or not it uses the discovered field to chain 481 // the entries in the pending list. 482 static bool pending_list_uses_discovered_field() { 483 return _pending_list_uses_discovered_field; 484 } 485 486 // whether discovery is done by multiple threads same-old-timeously 487 bool discovery_is_mt() const { return _discovery_is_mt; } 488 void set_mt_discovery(bool mt) { _discovery_is_mt = mt; } 489 490 // Whether we are in a phase when _processing_ is MT. 491 bool processing_is_mt() const { return _processing_is_mt; } 492 void set_mt_processing(bool mt) { _processing_is_mt = mt; } 493 494 // whether all enqueuing of weak references is complete 495 bool enqueuing_is_done() { return _enqueuing_is_done; } 496 void set_enqueuing_is_done(bool v) { _enqueuing_is_done = v; } 497 498 // iterate over oops 499 void weak_oops_do(OopClosure* f); // weak roots 500 501 // Balance each of the discovered lists. 502 void balance_all_queues(); 503 504 // Discover a Reference object, using appropriate discovery criteria 505 bool discover_reference(oop obj, ReferenceType rt); 506 507 // Process references found during GC (called by the garbage collector) 508 void process_discovered_references(BoolObjectClosure* is_alive, 509 OopClosure* keep_alive, 510 VoidClosure* complete_gc, 511 AbstractRefProcTaskExecutor* task_executor); 512 513 public: 514 // Enqueue references at end of GC (called by the garbage collector) 515 bool enqueue_discovered_references(AbstractRefProcTaskExecutor* task_executor = NULL); 516 517 // If a discovery is in process that is being superceded, abandon it: all 518 // the discovered lists will be empty, and all the objects on them will 519 // have NULL discovered fields. Must be called only at a safepoint. 520 void abandon_partial_discovery(); 521 522 // debugging 523 void verify_no_references_recorded() PRODUCT_RETURN; 524 void verify_referent(oop obj) PRODUCT_RETURN; 525 526 // clear the discovered lists (unlinking each entry). 527 void clear_discovered_references() PRODUCT_RETURN; 528 }; 529 530 // A utility class to disable reference discovery in 531 // the scope which contains it, for given ReferenceProcessor. 532 class NoRefDiscovery: StackObj { 533 private: 534 ReferenceProcessor* _rp; 535 bool _was_discovering_refs; 536 public: 537 NoRefDiscovery(ReferenceProcessor* rp) : _rp(rp) { 538 _was_discovering_refs = _rp->discovery_enabled(); 539 if (_was_discovering_refs) { 540 _rp->disable_discovery(); 541 } 542 } 543 544 ~NoRefDiscovery() { 545 if (_was_discovering_refs) { 546 _rp->enable_discovery(true /*verify_disabled*/, false /*check_no_refs*/); 547 } 548 } 549 }; 550 551 552 // A utility class to temporarily mutate the span of the 553 // given ReferenceProcessor in the scope that contains it. 554 class ReferenceProcessorSpanMutator: StackObj { 555 private: 556 ReferenceProcessor* _rp; 557 MemRegion _saved_span; 558 559 public: 560 ReferenceProcessorSpanMutator(ReferenceProcessor* rp, 561 MemRegion span): 562 _rp(rp) { 563 _saved_span = _rp->span(); 564 _rp->set_span(span); 565 } 566 567 ~ReferenceProcessorSpanMutator() { 568 _rp->set_span(_saved_span); 569 } 570 }; 571 572 // A utility class to temporarily change the MT'ness of 573 // reference discovery for the given ReferenceProcessor 574 // in the scope that contains it. 575 class ReferenceProcessorMTDiscoveryMutator: StackObj { 576 private: 577 ReferenceProcessor* _rp; 578 bool _saved_mt; 579 580 public: 581 ReferenceProcessorMTDiscoveryMutator(ReferenceProcessor* rp, 582 bool mt): 583 _rp(rp) { 584 _saved_mt = _rp->discovery_is_mt(); 585 _rp->set_mt_discovery(mt); 586 } 587 588 ~ReferenceProcessorMTDiscoveryMutator() { 589 _rp->set_mt_discovery(_saved_mt); 590 } 591 }; 592 593 594 // A utility class to temporarily change the disposition 595 // of the "is_alive_non_header" closure field of the 596 // given ReferenceProcessor in the scope that contains it. 597 class ReferenceProcessorIsAliveMutator: StackObj { 598 private: 599 ReferenceProcessor* _rp; 600 BoolObjectClosure* _saved_cl; 601 602 public: 603 ReferenceProcessorIsAliveMutator(ReferenceProcessor* rp, 604 BoolObjectClosure* cl): 605 _rp(rp) { 606 _saved_cl = _rp->is_alive_non_header(); 607 _rp->set_is_alive_non_header(cl); 608 } 609 610 ~ReferenceProcessorIsAliveMutator() { 611 _rp->set_is_alive_non_header(_saved_cl); 612 } 613 }; 614 615 // A utility class to temporarily change the disposition 616 // of the "discovery_is_atomic" field of the 617 // given ReferenceProcessor in the scope that contains it. 618 class ReferenceProcessorAtomicMutator: StackObj { 619 private: 620 ReferenceProcessor* _rp; 621 bool _saved_atomic_discovery; 622 623 public: 624 ReferenceProcessorAtomicMutator(ReferenceProcessor* rp, 625 bool atomic): 626 _rp(rp) { 627 _saved_atomic_discovery = _rp->discovery_is_atomic(); 628 _rp->set_atomic_discovery(atomic); 629 } 630 631 ~ReferenceProcessorAtomicMutator() { 632 _rp->set_atomic_discovery(_saved_atomic_discovery); 633 } 634 }; 635 636 637 // A utility class to temporarily change the MT processing 638 // disposition of the given ReferenceProcessor instance 639 // in the scope that contains it. 640 class ReferenceProcessorMTProcMutator: StackObj { 641 private: 642 ReferenceProcessor* _rp; 643 bool _saved_mt; 644 645 public: 646 ReferenceProcessorMTProcMutator(ReferenceProcessor* rp, 647 bool mt): 648 _rp(rp) { 649 _saved_mt = _rp->processing_is_mt(); 650 _rp->set_mt_processing(mt); 651 } 652 653 ~ReferenceProcessorMTProcMutator() { 654 _rp->set_mt_processing(_saved_mt); 655 } 656 }; 657 658 659 // This class is an interface used to implement task execution for the 660 // reference processing. 661 class AbstractRefProcTaskExecutor { 662 public: 663 664 // Abstract tasks to execute. 665 class ProcessTask; 666 class EnqueueTask; 667 668 // Executes a task using worker threads. 669 virtual void execute(ProcessTask& task) = 0; 670 virtual void execute(EnqueueTask& task) = 0; 671 672 // Switch to single threaded mode. 673 virtual void set_single_threaded_mode() { }; 674 }; 675 676 // Abstract reference processing task to execute. 677 class AbstractRefProcTaskExecutor::ProcessTask { 678 protected: 679 ProcessTask(ReferenceProcessor& ref_processor, 680 DiscoveredList refs_lists[], 681 bool marks_oops_alive) 682 : _ref_processor(ref_processor), 683 _refs_lists(refs_lists), 684 _marks_oops_alive(marks_oops_alive) 685 { } 686 687 public: 688 virtual void work(unsigned int work_id, BoolObjectClosure& is_alive, 689 OopClosure& keep_alive, 690 VoidClosure& complete_gc) = 0; 691 692 // Returns true if a task marks some oops as alive. 693 bool marks_oops_alive() const 694 { return _marks_oops_alive; } 695 696 protected: 697 ReferenceProcessor& _ref_processor; 698 DiscoveredList* _refs_lists; 699 const bool _marks_oops_alive; 700 }; 701 702 // Abstract reference processing task to execute. 703 class AbstractRefProcTaskExecutor::EnqueueTask { 704 protected: 705 EnqueueTask(ReferenceProcessor& ref_processor, 706 DiscoveredList refs_lists[], 707 HeapWord* pending_list_addr, 708 int n_queues) 709 : _ref_processor(ref_processor), 710 _refs_lists(refs_lists), 711 _pending_list_addr(pending_list_addr), 712 _n_queues(n_queues) 713 { } 714 715 public: 716 virtual void work(unsigned int work_id) = 0; 717 718 protected: 719 ReferenceProcessor& _ref_processor; 720 DiscoveredList* _refs_lists; 721 HeapWord* _pending_list_addr; 722 int _n_queues; 723 }; 724 725 #endif // SHARE_VM_MEMORY_REFERENCEPROCESSOR_HPP