1 /* 2 * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_VM_MEMORY_REFERENCEPROCESSOR_HPP 26 #define SHARE_VM_MEMORY_REFERENCEPROCESSOR_HPP 27 28 #include "memory/referencePolicy.hpp" 29 #include "memory/referenceProcessorStats.hpp" 30 #include "memory/referenceType.hpp" 31 #include "oops/instanceRefKlass.hpp" 32 33 class GCTimer; 34 35 // ReferenceProcessor class encapsulates the per-"collector" processing 36 // of java.lang.Reference objects for GC. The interface is useful for supporting 37 // a generational abstraction, in particular when there are multiple 38 // generations that are being independently collected -- possibly 39 // concurrently and/or incrementally. Note, however, that the 40 // ReferenceProcessor class abstracts away from a generational setting 41 // by using only a heap interval (called "span" below), thus allowing 42 // its use in a straightforward manner in a general, non-generational 43 // setting. 44 // 45 // The basic idea is that each ReferenceProcessor object concerns 46 // itself with ("weak") reference processing in a specific "span" 47 // of the heap of interest to a specific collector. Currently, 48 // the span is a convex interval of the heap, but, efficiency 49 // apart, there seems to be no reason it couldn't be extended 50 // (with appropriate modifications) to any "non-convex interval". 51 52 // forward references 53 class ReferencePolicy; 54 class AbstractRefProcTaskExecutor; 55 56 // List of discovered references. 57 class DiscoveredList { 58 public: 59 DiscoveredList() : _len(0), _compressed_head(0), _oop_head(NULL) { } 60 oop head() const { 61 return UseCompressedOops ? oopDesc::decode_heap_oop(_compressed_head) : 62 _oop_head; 63 } 64 HeapWord* adr_head() { 65 return UseCompressedOops ? (HeapWord*)&_compressed_head : 66 (HeapWord*)&_oop_head; 67 } 68 void set_head(oop o) { 69 if (UseCompressedOops) { 70 // Must compress the head ptr. 71 _compressed_head = oopDesc::encode_heap_oop(o); 72 } else { 73 _oop_head = o; 74 } 75 } 76 bool is_empty() const { return head() == NULL; } 77 size_t length() { return _len; } 78 void set_length(size_t len) { _len = len; } 79 void inc_length(size_t inc) { _len += inc; assert(_len > 0, "Error"); } 80 void dec_length(size_t dec) { _len -= dec; } 81 private: 82 // Set value depending on UseCompressedOops. This could be a template class 83 // but then we have to fix all the instantiations and declarations that use this class. 84 oop _oop_head; 85 narrowOop _compressed_head; 86 size_t _len; 87 }; 88 89 // Iterator for the list of discovered references. 90 class DiscoveredListIterator { 91 private: 92 DiscoveredList& _refs_list; 93 HeapWord* _prev_next; 94 oop _prev; 95 oop _ref; 96 HeapWord* _discovered_addr; 97 oop _next; 98 HeapWord* _referent_addr; 99 oop _referent; 100 OopClosure* _keep_alive; 101 BoolObjectClosure* _is_alive; 102 103 DEBUG_ONLY( 104 oop _first_seen; // cyclic linked list check 105 ) 106 107 NOT_PRODUCT( 108 size_t _processed; 109 size_t _removed; 110 ) 111 112 public: 113 inline DiscoveredListIterator(DiscoveredList& refs_list, 114 OopClosure* keep_alive, 115 BoolObjectClosure* is_alive): 116 _refs_list(refs_list), 117 _prev_next(refs_list.adr_head()), 118 _prev(NULL), 119 _ref(refs_list.head()), 120 #ifdef ASSERT 121 _first_seen(refs_list.head()), 122 #endif 123 #ifndef PRODUCT 124 _processed(0), 125 _removed(0), 126 #endif 127 _next(NULL), 128 _keep_alive(keep_alive), 129 _is_alive(is_alive) 130 { } 131 132 // End Of List. 133 inline bool has_next() const { return _ref != NULL; } 134 135 // Get oop to the Reference object. 136 inline oop obj() const { return _ref; } 137 138 // Get oop to the referent object. 139 inline oop referent() const { return _referent; } 140 141 // Returns true if referent is alive. 142 inline bool is_referent_alive() const { 143 return _is_alive->do_object_b(_referent); 144 } 145 146 // Loads data for the current reference. 147 // The "allow_null_referent" argument tells us to allow for the possibility 148 // of a NULL referent in the discovered Reference object. This typically 149 // happens in the case of concurrent collectors that may have done the 150 // discovery concurrently, or interleaved, with mutator execution. 151 void load_ptrs(DEBUG_ONLY(bool allow_null_referent)); 152 153 // Move to the next discovered reference. 154 inline void next() { 155 _prev_next = _discovered_addr; 156 _prev = _ref; 157 move_to_next(); 158 } 159 160 // Remove the current reference from the list 161 void remove(); 162 163 // Make the Reference object active again. 164 void make_active(); 165 166 // Make the referent alive. 167 inline void make_referent_alive() { 168 if (UseCompressedOops) { 169 _keep_alive->do_oop((narrowOop*)_referent_addr); 170 } else { 171 _keep_alive->do_oop((oop*)_referent_addr); 172 } 173 } 174 175 // Update the discovered field. 176 inline void update_discovered() { 177 // First _prev_next ref actually points into DiscoveredList (gross). 178 if (UseCompressedOops) { 179 if (!oopDesc::is_null(*(narrowOop*)_prev_next)) { 180 _keep_alive->do_oop((narrowOop*)_prev_next); 181 } 182 } else { 183 if (!oopDesc::is_null(*(oop*)_prev_next)) { 184 _keep_alive->do_oop((oop*)_prev_next); 185 } 186 } 187 } 188 189 // NULL out referent pointer. 190 void clear_referent(); 191 192 // Statistics 193 NOT_PRODUCT( 194 inline size_t processed() const { return _processed; } 195 inline size_t removed() const { return _removed; } 196 ) 197 198 inline void move_to_next() { 199 if (_ref == _next) { 200 // End of the list. 201 _ref = NULL; 202 } else { 203 _ref = _next; 204 } 205 assert(_ref != _first_seen, "cyclic ref_list found"); 206 NOT_PRODUCT(_processed++); 207 } 208 }; 209 210 class ReferenceProcessor : public CHeapObj<mtGC> { 211 212 private: 213 size_t total_count(DiscoveredList lists[]); 214 215 protected: 216 // Compatibility with pre-4965777 JDK's 217 static bool _pending_list_uses_discovered_field; 218 219 // The SoftReference master timestamp clock 220 static jlong _soft_ref_timestamp_clock; 221 222 MemRegion _span; // (right-open) interval of heap 223 // subject to wkref discovery 224 225 bool _discovering_refs; // true when discovery enabled 226 bool _discovery_is_atomic; // if discovery is atomic wrt 227 // other collectors in configuration 228 bool _discovery_is_mt; // true if reference discovery is MT. 229 230 // If true, setting "next" field of a discovered refs list requires 231 // write barrier(s). (Must be true if used in a collector in which 232 // elements of a discovered list may be moved during discovery: for 233 // example, a collector like Garbage-First that moves objects during a 234 // long-term concurrent marking phase that does weak reference 235 // discovery.) 236 bool _discovered_list_needs_barrier; 237 238 bool _enqueuing_is_done; // true if all weak references enqueued 239 bool _processing_is_mt; // true during phases when 240 // reference processing is MT. 241 uint _next_id; // round-robin mod _num_q counter in 242 // support of work distribution 243 244 // For collectors that do not keep GC liveness information 245 // in the object header, this field holds a closure that 246 // helps the reference processor determine the reachability 247 // of an oop. It is currently initialized to NULL for all 248 // collectors except for CMS and G1. 249 BoolObjectClosure* _is_alive_non_header; 250 251 // Soft ref clearing policies 252 // . the default policy 253 static ReferencePolicy* _default_soft_ref_policy; 254 // . the "clear all" policy 255 static ReferencePolicy* _always_clear_soft_ref_policy; 256 // . the current policy below is either one of the above 257 ReferencePolicy* _current_soft_ref_policy; 258 259 // The discovered ref lists themselves 260 261 // The active MT'ness degree of the queues below 262 uint _num_q; 263 // The maximum MT'ness degree of the queues below 264 uint _max_num_q; 265 266 // Master array of discovered oops 267 DiscoveredList* _discovered_refs; 268 269 // Arrays of lists of oops, one per thread (pointers into master array above) 270 DiscoveredList* _discoveredSoftRefs; 271 DiscoveredList* _discoveredWeakRefs; 272 DiscoveredList* _discoveredFinalRefs; 273 DiscoveredList* _discoveredPhantomRefs; 274 275 public: 276 static int number_of_subclasses_of_ref() { return (REF_PHANTOM - REF_OTHER); } 277 278 uint num_q() { return _num_q; } 279 uint max_num_q() { return _max_num_q; } 280 void set_active_mt_degree(uint v) { _num_q = v; } 281 282 DiscoveredList* discovered_refs() { return _discovered_refs; } 283 284 ReferencePolicy* setup_policy(bool always_clear) { 285 _current_soft_ref_policy = always_clear ? 286 _always_clear_soft_ref_policy : _default_soft_ref_policy; 287 _current_soft_ref_policy->setup(); // snapshot the policy threshold 288 return _current_soft_ref_policy; 289 } 290 291 // Process references with a certain reachability level. 292 size_t process_discovered_reflist(DiscoveredList refs_lists[], 293 ReferencePolicy* policy, 294 bool clear_referent, 295 BoolObjectClosure* is_alive, 296 OopClosure* keep_alive, 297 VoidClosure* complete_gc, 298 AbstractRefProcTaskExecutor* task_executor); 299 300 void process_phaseJNI(BoolObjectClosure* is_alive, 301 OopClosure* keep_alive, 302 VoidClosure* complete_gc); 303 304 // Work methods used by the method process_discovered_reflist 305 // Phase1: keep alive all those referents that are otherwise 306 // dead but which must be kept alive by policy (and their closure). 307 void process_phase1(DiscoveredList& refs_list, 308 ReferencePolicy* policy, 309 BoolObjectClosure* is_alive, 310 OopClosure* keep_alive, 311 VoidClosure* complete_gc); 312 // Phase2: remove all those references whose referents are 313 // reachable. 314 inline void process_phase2(DiscoveredList& refs_list, 315 BoolObjectClosure* is_alive, 316 OopClosure* keep_alive, 317 VoidClosure* complete_gc) { 318 if (discovery_is_atomic()) { 319 // complete_gc is ignored in this case for this phase 320 pp2_work(refs_list, is_alive, keep_alive); 321 } else { 322 assert(complete_gc != NULL, "Error"); 323 pp2_work_concurrent_discovery(refs_list, is_alive, 324 keep_alive, complete_gc); 325 } 326 } 327 // Work methods in support of process_phase2 328 void pp2_work(DiscoveredList& refs_list, 329 BoolObjectClosure* is_alive, 330 OopClosure* keep_alive); 331 void pp2_work_concurrent_discovery( 332 DiscoveredList& refs_list, 333 BoolObjectClosure* is_alive, 334 OopClosure* keep_alive, 335 VoidClosure* complete_gc); 336 // Phase3: process the referents by either clearing them 337 // or keeping them alive (and their closure) 338 void process_phase3(DiscoveredList& refs_list, 339 bool clear_referent, 340 BoolObjectClosure* is_alive, 341 OopClosure* keep_alive, 342 VoidClosure* complete_gc); 343 344 // Enqueue references with a certain reachability level 345 void enqueue_discovered_reflist(DiscoveredList& refs_list, HeapWord* pending_list_addr); 346 347 // "Preclean" all the discovered reference lists 348 // by removing references with strongly reachable referents. 349 // The first argument is a predicate on an oop that indicates 350 // its (strong) reachability and the second is a closure that 351 // may be used to incrementalize or abort the precleaning process. 352 // The caller is responsible for taking care of potential 353 // interference with concurrent operations on these lists 354 // (or predicates involved) by other threads. Currently 355 // only used by the CMS collector. 356 void preclean_discovered_references(BoolObjectClosure* is_alive, 357 OopClosure* keep_alive, 358 VoidClosure* complete_gc, 359 YieldClosure* yield, 360 GCTimer* gc_timer); 361 362 // Delete entries in the discovered lists that have 363 // either a null referent or are not active. Such 364 // Reference objects can result from the clearing 365 // or enqueueing of Reference objects concurrent 366 // with their discovery by a (concurrent) collector. 367 // For a definition of "active" see java.lang.ref.Reference; 368 // Refs are born active, become inactive when enqueued, 369 // and never become active again. The state of being 370 // active is encoded as follows: A Ref is active 371 // if and only if its "next" field is NULL. 372 void clean_up_discovered_references(); 373 void clean_up_discovered_reflist(DiscoveredList& refs_list); 374 375 // Returns the name of the discovered reference list 376 // occupying the i / _num_q slot. 377 const char* list_name(uint i); 378 379 void enqueue_discovered_reflists(HeapWord* pending_list_addr, AbstractRefProcTaskExecutor* task_executor); 380 381 protected: 382 // Set the 'discovered' field of the given reference to 383 // the given value - emitting barriers depending upon 384 // the value of _discovered_list_needs_barrier. 385 void set_discovered(oop ref, oop value); 386 387 // "Preclean" the given discovered reference list 388 // by removing references with strongly reachable referents. 389 // Currently used in support of CMS only. 390 void preclean_discovered_reflist(DiscoveredList& refs_list, 391 BoolObjectClosure* is_alive, 392 OopClosure* keep_alive, 393 VoidClosure* complete_gc, 394 YieldClosure* yield); 395 396 // round-robin mod _num_q (not: _not_ mode _max_num_q) 397 uint next_id() { 398 uint id = _next_id; 399 if (++_next_id == _num_q) { 400 _next_id = 0; 401 } 402 return id; 403 } 404 DiscoveredList* get_discovered_list(ReferenceType rt); 405 inline void add_to_discovered_list_mt(DiscoveredList& refs_list, oop obj, 406 HeapWord* discovered_addr); 407 void verify_ok_to_handle_reflists() PRODUCT_RETURN; 408 409 void clear_discovered_references(DiscoveredList& refs_list); 410 void abandon_partial_discovered_list(DiscoveredList& refs_list); 411 412 // Calculate the number of jni handles. 413 unsigned int count_jni_refs(); 414 415 // Balances reference queues. 416 void balance_queues(DiscoveredList ref_lists[]); 417 418 // Update (advance) the soft ref master clock field. 419 void update_soft_ref_master_clock(); 420 421 public: 422 // Default parameters give you a vanilla reference processor. 423 ReferenceProcessor(MemRegion span, 424 bool mt_processing = false, uint mt_processing_degree = 1, 425 bool mt_discovery = false, uint mt_discovery_degree = 1, 426 bool atomic_discovery = true, 427 BoolObjectClosure* is_alive_non_header = NULL, 428 bool discovered_list_needs_barrier = false); 429 430 // RefDiscoveryPolicy values 431 enum DiscoveryPolicy { 432 ReferenceBasedDiscovery = 0, 433 ReferentBasedDiscovery = 1, 434 DiscoveryPolicyMin = ReferenceBasedDiscovery, 435 DiscoveryPolicyMax = ReferentBasedDiscovery 436 }; 437 438 static void init_statics(); 439 440 public: 441 // get and set "is_alive_non_header" field 442 BoolObjectClosure* is_alive_non_header() { 443 return _is_alive_non_header; 444 } 445 void set_is_alive_non_header(BoolObjectClosure* is_alive_non_header) { 446 _is_alive_non_header = is_alive_non_header; 447 } 448 449 // get and set span 450 MemRegion span() { return _span; } 451 void set_span(MemRegion span) { _span = span; } 452 453 // start and stop weak ref discovery 454 void enable_discovery(bool verify_disabled, bool check_no_refs); 455 void disable_discovery() { _discovering_refs = false; } 456 bool discovery_enabled() { return _discovering_refs; } 457 458 // whether discovery is atomic wrt other collectors 459 bool discovery_is_atomic() const { return _discovery_is_atomic; } 460 void set_atomic_discovery(bool atomic) { _discovery_is_atomic = atomic; } 461 462 // whether the JDK in which we are embedded is a pre-4965777 JDK, 463 // and thus whether or not it uses the discovered field to chain 464 // the entries in the pending list. 465 static bool pending_list_uses_discovered_field() { 466 return _pending_list_uses_discovered_field; 467 } 468 469 // whether discovery is done by multiple threads same-old-timeously 470 bool discovery_is_mt() const { return _discovery_is_mt; } 471 void set_mt_discovery(bool mt) { _discovery_is_mt = mt; } 472 473 // Whether we are in a phase when _processing_ is MT. 474 bool processing_is_mt() const { return _processing_is_mt; } 475 void set_mt_processing(bool mt) { _processing_is_mt = mt; } 476 477 // whether all enqueueing of weak references is complete 478 bool enqueuing_is_done() { return _enqueuing_is_done; } 479 void set_enqueuing_is_done(bool v) { _enqueuing_is_done = v; } 480 481 // iterate over oops 482 void weak_oops_do(OopClosure* f); // weak roots 483 484 // Balance each of the discovered lists. 485 void balance_all_queues(); 486 void verify_list(DiscoveredList& ref_list); 487 488 // Discover a Reference object, using appropriate discovery criteria 489 bool discover_reference(oop obj, ReferenceType rt); 490 491 // Process references found during GC (called by the garbage collector) 492 ReferenceProcessorStats 493 process_discovered_references(BoolObjectClosure* is_alive, 494 OopClosure* keep_alive, 495 VoidClosure* complete_gc, 496 AbstractRefProcTaskExecutor* task_executor, 497 GCTimer *gc_timer); 498 499 // Enqueue references at end of GC (called by the garbage collector) 500 bool enqueue_discovered_references(AbstractRefProcTaskExecutor* task_executor = NULL); 501 502 // If a discovery is in process that is being superceded, abandon it: all 503 // the discovered lists will be empty, and all the objects on them will 504 // have NULL discovered fields. Must be called only at a safepoint. 505 void abandon_partial_discovery(); 506 507 // debugging 508 void verify_no_references_recorded() PRODUCT_RETURN; 509 void verify_referent(oop obj) PRODUCT_RETURN; 510 511 // clear the discovered lists (unlinking each entry). 512 void clear_discovered_references() PRODUCT_RETURN; 513 }; 514 515 // A utility class to disable reference discovery in 516 // the scope which contains it, for given ReferenceProcessor. 517 class NoRefDiscovery: StackObj { 518 private: 519 ReferenceProcessor* _rp; 520 bool _was_discovering_refs; 521 public: 522 NoRefDiscovery(ReferenceProcessor* rp) : _rp(rp) { 523 _was_discovering_refs = _rp->discovery_enabled(); 524 if (_was_discovering_refs) { 525 _rp->disable_discovery(); 526 } 527 } 528 529 ~NoRefDiscovery() { 530 if (_was_discovering_refs) { 531 _rp->enable_discovery(true /*verify_disabled*/, false /*check_no_refs*/); 532 } 533 } 534 }; 535 536 537 // A utility class to temporarily mutate the span of the 538 // given ReferenceProcessor in the scope that contains it. 539 class ReferenceProcessorSpanMutator: StackObj { 540 private: 541 ReferenceProcessor* _rp; 542 MemRegion _saved_span; 543 544 public: 545 ReferenceProcessorSpanMutator(ReferenceProcessor* rp, 546 MemRegion span): 547 _rp(rp) { 548 _saved_span = _rp->span(); 549 _rp->set_span(span); 550 } 551 552 ~ReferenceProcessorSpanMutator() { 553 _rp->set_span(_saved_span); 554 } 555 }; 556 557 // A utility class to temporarily change the MT'ness of 558 // reference discovery for the given ReferenceProcessor 559 // in the scope that contains it. 560 class ReferenceProcessorMTDiscoveryMutator: StackObj { 561 private: 562 ReferenceProcessor* _rp; 563 bool _saved_mt; 564 565 public: 566 ReferenceProcessorMTDiscoveryMutator(ReferenceProcessor* rp, 567 bool mt): 568 _rp(rp) { 569 _saved_mt = _rp->discovery_is_mt(); 570 _rp->set_mt_discovery(mt); 571 } 572 573 ~ReferenceProcessorMTDiscoveryMutator() { 574 _rp->set_mt_discovery(_saved_mt); 575 } 576 }; 577 578 579 // A utility class to temporarily change the disposition 580 // of the "is_alive_non_header" closure field of the 581 // given ReferenceProcessor in the scope that contains it. 582 class ReferenceProcessorIsAliveMutator: StackObj { 583 private: 584 ReferenceProcessor* _rp; 585 BoolObjectClosure* _saved_cl; 586 587 public: 588 ReferenceProcessorIsAliveMutator(ReferenceProcessor* rp, 589 BoolObjectClosure* cl): 590 _rp(rp) { 591 _saved_cl = _rp->is_alive_non_header(); 592 _rp->set_is_alive_non_header(cl); 593 } 594 595 ~ReferenceProcessorIsAliveMutator() { 596 _rp->set_is_alive_non_header(_saved_cl); 597 } 598 }; 599 600 // A utility class to temporarily change the disposition 601 // of the "discovery_is_atomic" field of the 602 // given ReferenceProcessor in the scope that contains it. 603 class ReferenceProcessorAtomicMutator: StackObj { 604 private: 605 ReferenceProcessor* _rp; 606 bool _saved_atomic_discovery; 607 608 public: 609 ReferenceProcessorAtomicMutator(ReferenceProcessor* rp, 610 bool atomic): 611 _rp(rp) { 612 _saved_atomic_discovery = _rp->discovery_is_atomic(); 613 _rp->set_atomic_discovery(atomic); 614 } 615 616 ~ReferenceProcessorAtomicMutator() { 617 _rp->set_atomic_discovery(_saved_atomic_discovery); 618 } 619 }; 620 621 622 // A utility class to temporarily change the MT processing 623 // disposition of the given ReferenceProcessor instance 624 // in the scope that contains it. 625 class ReferenceProcessorMTProcMutator: StackObj { 626 private: 627 ReferenceProcessor* _rp; 628 bool _saved_mt; 629 630 public: 631 ReferenceProcessorMTProcMutator(ReferenceProcessor* rp, 632 bool mt): 633 _rp(rp) { 634 _saved_mt = _rp->processing_is_mt(); 635 _rp->set_mt_processing(mt); 636 } 637 638 ~ReferenceProcessorMTProcMutator() { 639 _rp->set_mt_processing(_saved_mt); 640 } 641 }; 642 643 644 // This class is an interface used to implement task execution for the 645 // reference processing. 646 class AbstractRefProcTaskExecutor { 647 public: 648 649 // Abstract tasks to execute. 650 class ProcessTask; 651 class EnqueueTask; 652 653 // Executes a task using worker threads. 654 virtual void execute(ProcessTask& task) = 0; 655 virtual void execute(EnqueueTask& task) = 0; 656 657 // Switch to single threaded mode. 658 virtual void set_single_threaded_mode() { }; 659 }; 660 661 // Abstract reference processing task to execute. 662 class AbstractRefProcTaskExecutor::ProcessTask { 663 protected: 664 ProcessTask(ReferenceProcessor& ref_processor, 665 DiscoveredList refs_lists[], 666 bool marks_oops_alive) 667 : _ref_processor(ref_processor), 668 _refs_lists(refs_lists), 669 _marks_oops_alive(marks_oops_alive) 670 { } 671 672 public: 673 virtual void work(unsigned int work_id, BoolObjectClosure& is_alive, 674 OopClosure& keep_alive, 675 VoidClosure& complete_gc) = 0; 676 677 // Returns true if a task marks some oops as alive. 678 bool marks_oops_alive() const 679 { return _marks_oops_alive; } 680 681 protected: 682 ReferenceProcessor& _ref_processor; 683 DiscoveredList* _refs_lists; 684 const bool _marks_oops_alive; 685 }; 686 687 // Abstract reference processing task to execute. 688 class AbstractRefProcTaskExecutor::EnqueueTask { 689 protected: 690 EnqueueTask(ReferenceProcessor& ref_processor, 691 DiscoveredList refs_lists[], 692 HeapWord* pending_list_addr, 693 int n_queues) 694 : _ref_processor(ref_processor), 695 _refs_lists(refs_lists), 696 _pending_list_addr(pending_list_addr), 697 _n_queues(n_queues) 698 { } 699 700 public: 701 virtual void work(unsigned int work_id) = 0; 702 703 protected: 704 ReferenceProcessor& _ref_processor; 705 DiscoveredList* _refs_lists; 706 HeapWord* _pending_list_addr; 707 int _n_queues; 708 }; 709 710 #endif // SHARE_VM_MEMORY_REFERENCEPROCESSOR_HPP