1 /* 2 * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_VM_MEMORY_REFERENCEPROCESSOR_HPP 26 #define SHARE_VM_MEMORY_REFERENCEPROCESSOR_HPP 27 28 #include "memory/referencePolicy.hpp" 29 #include "oops/instanceRefKlass.hpp" 30 31 // ReferenceProcessor class encapsulates the per-"collector" processing 32 // of java.lang.Reference objects for GC. The interface is useful for supporting 33 // a generational abstraction, in particular when there are multiple 34 // generations that are being independently collected -- possibly 35 // concurrently and/or incrementally. Note, however, that the 36 // ReferenceProcessor class abstracts away from a generational setting 37 // by using only a heap interval (called "span" below), thus allowing 38 // its use in a straightforward manner in a general, non-generational 39 // setting. 40 // 41 // The basic idea is that each ReferenceProcessor object concerns 42 // itself with ("weak") reference processing in a specific "span" 43 // of the heap of interest to a specific collector. Currently, 44 // the span is a convex interval of the heap, but, efficiency 45 // apart, there seems to be no reason it couldn't be extended 46 // (with appropriate modifications) to any "non-convex interval". 47 48 // forward references 49 class ReferencePolicy; 50 class AbstractRefProcTaskExecutor; 51 class DiscoveredList; 52 53 class ReferenceProcessor : public CHeapObj { 54 protected: 55 MemRegion _span; // (right-open) interval of heap 56 // subject to wkref discovery 57 bool _discovering_refs; // true when discovery enabled 58 bool _discovery_is_atomic; // if discovery is atomic wrt 59 // other collectors in configuration 60 bool _discovery_is_mt; // true if reference discovery is MT. 61 // If true, setting "next" field of a discovered refs list requires 62 // write barrier(s). (Must be true if used in a collector in which 63 // elements of a discovered list may be moved during discovery: for 64 // example, a collector like Garbage-First that moves objects during a 65 // long-term concurrent marking phase that does weak reference 66 // discovery.) 67 bool _discovered_list_needs_barrier; 68 BarrierSet* _bs; // Cached copy of BarrierSet. 69 bool _enqueuing_is_done; // true if all weak references enqueued 70 bool _processing_is_mt; // true during phases when 71 // reference processing is MT. 72 int _next_id; // round-robin mod _num_q counter in 73 // support of work distribution 74 75 // For collectors that do not keep GC marking information 76 // in the object header, this field holds a closure that 77 // helps the reference processor determine the reachability 78 // of an oop (the field is currently initialized to NULL for 79 // all collectors but the CMS collector). 80 BoolObjectClosure* _is_alive_non_header; 81 82 // Soft ref clearing policies 83 // . the default policy 84 static ReferencePolicy* _default_soft_ref_policy; 85 // . the "clear all" policy 86 static ReferencePolicy* _always_clear_soft_ref_policy; 87 // . the current policy below is either one of the above 88 ReferencePolicy* _current_soft_ref_policy; 89 90 // The discovered ref lists themselves 91 92 // The active MT'ness degree of the queues below 93 int _num_q; 94 // The maximum MT'ness degree of the queues below 95 int _max_num_q; 96 // Arrays of lists of oops, one per thread 97 DiscoveredList* _discoveredSoftRefs; 98 DiscoveredList* _discoveredWeakRefs; 99 DiscoveredList* _discoveredFinalRefs; 100 DiscoveredList* _discoveredPhantomRefs; 101 102 public: 103 int num_q() { return _num_q; } 104 int max_num_q() { return _max_num_q; } 105 void set_active_mt_degree(int v) { _num_q = v; } 106 DiscoveredList* discovered_soft_refs() { return _discoveredSoftRefs; } 107 ReferencePolicy* setup_policy(bool always_clear) { 108 _current_soft_ref_policy = always_clear ? 109 _always_clear_soft_ref_policy : _default_soft_ref_policy; 110 _current_soft_ref_policy->setup(); // snapshot the policy threshold 111 return _current_soft_ref_policy; 112 } 113 114 public: 115 // Process references with a certain reachability level. 116 void process_discovered_reflist(DiscoveredList refs_lists[], 117 ReferencePolicy* policy, 118 bool clear_referent, 119 BoolObjectClosure* is_alive, 120 OopClosure* keep_alive, 121 VoidClosure* complete_gc, 122 AbstractRefProcTaskExecutor* task_executor); 123 124 void process_phaseJNI(BoolObjectClosure* is_alive, 125 OopClosure* keep_alive, 126 VoidClosure* complete_gc); 127 128 // Work methods used by the method process_discovered_reflist 129 // Phase1: keep alive all those referents that are otherwise 130 // dead but which must be kept alive by policy (and their closure). 131 void process_phase1(DiscoveredList& refs_list, 132 ReferencePolicy* policy, 133 BoolObjectClosure* is_alive, 134 OopClosure* keep_alive, 135 VoidClosure* complete_gc); 136 // Phase2: remove all those references whose referents are 137 // reachable. 138 inline void process_phase2(DiscoveredList& refs_list, 139 BoolObjectClosure* is_alive, 140 OopClosure* keep_alive, 141 VoidClosure* complete_gc) { 142 if (discovery_is_atomic()) { 143 // complete_gc is ignored in this case for this phase 144 pp2_work(refs_list, is_alive, keep_alive); 145 } else { 146 assert(complete_gc != NULL, "Error"); 147 pp2_work_concurrent_discovery(refs_list, is_alive, 148 keep_alive, complete_gc); 149 } 150 } 151 // Work methods in support of process_phase2 152 void pp2_work(DiscoveredList& refs_list, 153 BoolObjectClosure* is_alive, 154 OopClosure* keep_alive); 155 void pp2_work_concurrent_discovery( 156 DiscoveredList& refs_list, 157 BoolObjectClosure* is_alive, 158 OopClosure* keep_alive, 159 VoidClosure* complete_gc); 160 // Phase3: process the referents by either clearing them 161 // or keeping them alive (and their closure) 162 void process_phase3(DiscoveredList& refs_list, 163 bool clear_referent, 164 BoolObjectClosure* is_alive, 165 OopClosure* keep_alive, 166 VoidClosure* complete_gc); 167 168 // Enqueue references with a certain reachability level 169 void enqueue_discovered_reflist(DiscoveredList& refs_list, HeapWord* pending_list_addr); 170 171 // "Preclean" all the discovered reference lists 172 // by removing references with strongly reachable referents. 173 // The first argument is a predicate on an oop that indicates 174 // its (strong) reachability and the second is a closure that 175 // may be used to incrementalize or abort the precleaning process. 176 // The caller is responsible for taking care of potential 177 // interference with concurrent operations on these lists 178 // (or predicates involved) by other threads. Currently 179 // only used by the CMS collector. should_unload_classes is 180 // used to aid assertion checking when classes are collected. 181 void preclean_discovered_references(BoolObjectClosure* is_alive, 182 OopClosure* keep_alive, 183 VoidClosure* complete_gc, 184 YieldClosure* yield, 185 bool should_unload_classes); 186 187 // Delete entries in the discovered lists that have 188 // either a null referent or are not active. Such 189 // Reference objects can result from the clearing 190 // or enqueueing of Reference objects concurrent 191 // with their discovery by a (concurrent) collector. 192 // For a definition of "active" see java.lang.ref.Reference; 193 // Refs are born active, become inactive when enqueued, 194 // and never become active again. The state of being 195 // active is encoded as follows: A Ref is active 196 // if and only if its "next" field is NULL. 197 void clean_up_discovered_references(); 198 void clean_up_discovered_reflist(DiscoveredList& refs_list); 199 200 // Returns the name of the discovered reference list 201 // occupying the i / _num_q slot. 202 const char* list_name(int i); 203 204 void enqueue_discovered_reflists(HeapWord* pending_list_addr, AbstractRefProcTaskExecutor* task_executor); 205 206 protected: 207 // "Preclean" the given discovered reference list 208 // by removing references with strongly reachable referents. 209 // Currently used in support of CMS only. 210 void preclean_discovered_reflist(DiscoveredList& refs_list, 211 BoolObjectClosure* is_alive, 212 OopClosure* keep_alive, 213 VoidClosure* complete_gc, 214 YieldClosure* yield); 215 216 // round-robin mod _num_q (not: _not_ mode _max_num_q) 217 int next_id() { 218 int id = _next_id; 219 if (++_next_id == _num_q) { 220 _next_id = 0; 221 } 222 return id; 223 } 224 DiscoveredList* get_discovered_list(ReferenceType rt); 225 inline void add_to_discovered_list_mt(DiscoveredList& refs_list, oop obj, 226 HeapWord* discovered_addr); 227 void verify_ok_to_handle_reflists() PRODUCT_RETURN; 228 229 void clear_discovered_references(DiscoveredList& refs_list); 230 void abandon_partial_discovered_list(DiscoveredList& refs_list); 231 232 // Calculate the number of jni handles. 233 unsigned int count_jni_refs(); 234 235 // Balances reference queues. 236 void balance_queues(DiscoveredList ref_lists[]); 237 238 // Update (advance) the soft ref master clock field. 239 void update_soft_ref_master_clock(); 240 241 public: 242 // constructor 243 ReferenceProcessor(): 244 _span((HeapWord*)NULL, (HeapWord*)NULL), 245 _discoveredSoftRefs(NULL), _discoveredWeakRefs(NULL), 246 _discoveredFinalRefs(NULL), _discoveredPhantomRefs(NULL), 247 _discovering_refs(false), 248 _discovery_is_atomic(true), 249 _enqueuing_is_done(false), 250 _discovery_is_mt(false), 251 _discovered_list_needs_barrier(false), 252 _bs(NULL), 253 _is_alive_non_header(NULL), 254 _num_q(0), 255 _max_num_q(0), 256 _processing_is_mt(false), 257 _next_id(0) 258 { } 259 260 // Default parameters give you a vanilla reference processor. 261 ReferenceProcessor(MemRegion span, 262 bool mt_processing = false, int mt_processing_degree = 1, 263 bool mt_discovery = false, int mt_discovery_degree = 1, 264 bool atomic_discovery = true, 265 BoolObjectClosure* is_alive_non_header = NULL, 266 bool discovered_list_needs_barrier = false); 267 268 // RefDiscoveryPolicy values 269 enum DiscoveryPolicy { 270 ReferenceBasedDiscovery = 0, 271 ReferentBasedDiscovery = 1, 272 DiscoveryPolicyMin = ReferenceBasedDiscovery, 273 DiscoveryPolicyMax = ReferentBasedDiscovery 274 }; 275 276 static void init_statics(); 277 278 public: 279 // get and set "is_alive_non_header" field 280 BoolObjectClosure* is_alive_non_header() { 281 return _is_alive_non_header; 282 } 283 void set_is_alive_non_header(BoolObjectClosure* is_alive_non_header) { 284 _is_alive_non_header = is_alive_non_header; 285 } 286 287 // get and set span 288 MemRegion span() { return _span; } 289 void set_span(MemRegion span) { _span = span; } 290 291 // start and stop weak ref discovery 292 void enable_discovery() { _discovering_refs = true; } 293 void disable_discovery() { _discovering_refs = false; } 294 bool discovery_enabled() { return _discovering_refs; } 295 296 // whether discovery is atomic wrt other collectors 297 bool discovery_is_atomic() const { return _discovery_is_atomic; } 298 void set_atomic_discovery(bool atomic) { _discovery_is_atomic = atomic; } 299 300 // whether discovery is done by multiple threads same-old-timeously 301 bool discovery_is_mt() const { return _discovery_is_mt; } 302 void set_mt_discovery(bool mt) { _discovery_is_mt = mt; } 303 304 // Whether we are in a phase when _processing_ is MT. 305 bool processing_is_mt() const { return _processing_is_mt; } 306 void set_mt_processing(bool mt) { _processing_is_mt = mt; } 307 308 // whether all enqueuing of weak references is complete 309 bool enqueuing_is_done() { return _enqueuing_is_done; } 310 void set_enqueuing_is_done(bool v) { _enqueuing_is_done = v; } 311 312 // iterate over oops 313 void weak_oops_do(OopClosure* f); // weak roots 314 315 // Balance each of the discovered lists. 316 void balance_all_queues(); 317 318 // Discover a Reference object, using appropriate discovery criteria 319 bool discover_reference(oop obj, ReferenceType rt); 320 321 // Process references found during GC (called by the garbage collector) 322 void process_discovered_references(BoolObjectClosure* is_alive, 323 OopClosure* keep_alive, 324 VoidClosure* complete_gc, 325 AbstractRefProcTaskExecutor* task_executor); 326 327 public: 328 // Enqueue references at end of GC (called by the garbage collector) 329 bool enqueue_discovered_references(AbstractRefProcTaskExecutor* task_executor = NULL); 330 331 // If a discovery is in process that is being superceded, abandon it: all 332 // the discovered lists will be empty, and all the objects on them will 333 // have NULL discovered fields. Must be called only at a safepoint. 334 void abandon_partial_discovery(); 335 336 // debugging 337 void verify_no_references_recorded() PRODUCT_RETURN; 338 void verify_referent(oop obj) PRODUCT_RETURN; 339 340 // clear the discovered lists (unlinking each entry). 341 void clear_discovered_references() PRODUCT_RETURN; 342 }; 343 344 // A utility class to disable reference discovery in 345 // the scope which contains it, for given ReferenceProcessor. 346 class NoRefDiscovery: StackObj { 347 private: 348 ReferenceProcessor* _rp; 349 bool _was_discovering_refs; 350 public: 351 NoRefDiscovery(ReferenceProcessor* rp) : _rp(rp) { 352 _was_discovering_refs = _rp->discovery_enabled(); 353 if (_was_discovering_refs) { 354 _rp->disable_discovery(); 355 } 356 } 357 358 ~NoRefDiscovery() { 359 if (_was_discovering_refs) { 360 _rp->enable_discovery(); 361 } 362 } 363 }; 364 365 366 // A utility class to temporarily mutate the span of the 367 // given ReferenceProcessor in the scope that contains it. 368 class ReferenceProcessorSpanMutator: StackObj { 369 private: 370 ReferenceProcessor* _rp; 371 MemRegion _saved_span; 372 373 public: 374 ReferenceProcessorSpanMutator(ReferenceProcessor* rp, 375 MemRegion span): 376 _rp(rp) { 377 _saved_span = _rp->span(); 378 _rp->set_span(span); 379 } 380 381 ~ReferenceProcessorSpanMutator() { 382 _rp->set_span(_saved_span); 383 } 384 }; 385 386 // A utility class to temporarily change the MT'ness of 387 // reference discovery for the given ReferenceProcessor 388 // in the scope that contains it. 389 class ReferenceProcessorMTDiscoveryMutator: StackObj { 390 private: 391 ReferenceProcessor* _rp; 392 bool _saved_mt; 393 394 public: 395 ReferenceProcessorMTDiscoveryMutator(ReferenceProcessor* rp, 396 bool mt): 397 _rp(rp) { 398 _saved_mt = _rp->discovery_is_mt(); 399 _rp->set_mt_discovery(mt); 400 } 401 402 ~ReferenceProcessorMTDiscoveryMutator() { 403 _rp->set_mt_discovery(_saved_mt); 404 } 405 }; 406 407 408 // A utility class to temporarily change the disposition 409 // of the "is_alive_non_header" closure field of the 410 // given ReferenceProcessor in the scope that contains it. 411 class ReferenceProcessorIsAliveMutator: StackObj { 412 private: 413 ReferenceProcessor* _rp; 414 BoolObjectClosure* _saved_cl; 415 416 public: 417 ReferenceProcessorIsAliveMutator(ReferenceProcessor* rp, 418 BoolObjectClosure* cl): 419 _rp(rp) { 420 _saved_cl = _rp->is_alive_non_header(); 421 _rp->set_is_alive_non_header(cl); 422 } 423 424 ~ReferenceProcessorIsAliveMutator() { 425 _rp->set_is_alive_non_header(_saved_cl); 426 } 427 }; 428 429 // A utility class to temporarily change the disposition 430 // of the "discovery_is_atomic" field of the 431 // given ReferenceProcessor in the scope that contains it. 432 class ReferenceProcessorAtomicMutator: StackObj { 433 private: 434 ReferenceProcessor* _rp; 435 bool _saved_atomic_discovery; 436 437 public: 438 ReferenceProcessorAtomicMutator(ReferenceProcessor* rp, 439 bool atomic): 440 _rp(rp) { 441 _saved_atomic_discovery = _rp->discovery_is_atomic(); 442 _rp->set_atomic_discovery(atomic); 443 } 444 445 ~ReferenceProcessorAtomicMutator() { 446 _rp->set_atomic_discovery(_saved_atomic_discovery); 447 } 448 }; 449 450 451 // A utility class to temporarily change the MT processing 452 // disposition of the given ReferenceProcessor instance 453 // in the scope that contains it. 454 class ReferenceProcessorMTProcMutator: StackObj { 455 private: 456 ReferenceProcessor* _rp; 457 bool _saved_mt; 458 459 public: 460 ReferenceProcessorMTProcMutator(ReferenceProcessor* rp, 461 bool mt): 462 _rp(rp) { 463 _saved_mt = _rp->processing_is_mt(); 464 _rp->set_mt_processing(mt); 465 } 466 467 ~ReferenceProcessorMTProcMutator() { 468 _rp->set_mt_processing(_saved_mt); 469 } 470 }; 471 472 473 // This class is an interface used to implement task execution for the 474 // reference processing. 475 class AbstractRefProcTaskExecutor { 476 public: 477 478 // Abstract tasks to execute. 479 class ProcessTask; 480 class EnqueueTask; 481 482 // Executes a task using worker threads. 483 virtual void execute(ProcessTask& task) = 0; 484 virtual void execute(EnqueueTask& task) = 0; 485 486 // Switch to single threaded mode. 487 virtual void set_single_threaded_mode() { }; 488 }; 489 490 // Abstract reference processing task to execute. 491 class AbstractRefProcTaskExecutor::ProcessTask { 492 protected: 493 ProcessTask(ReferenceProcessor& ref_processor, 494 DiscoveredList refs_lists[], 495 bool marks_oops_alive) 496 : _ref_processor(ref_processor), 497 _refs_lists(refs_lists), 498 _marks_oops_alive(marks_oops_alive) 499 { } 500 501 public: 502 virtual void work(unsigned int work_id, BoolObjectClosure& is_alive, 503 OopClosure& keep_alive, 504 VoidClosure& complete_gc) = 0; 505 506 // Returns true if a task marks some oops as alive. 507 bool marks_oops_alive() const 508 { return _marks_oops_alive; } 509 510 protected: 511 ReferenceProcessor& _ref_processor; 512 DiscoveredList* _refs_lists; 513 const bool _marks_oops_alive; 514 }; 515 516 // Abstract reference processing task to execute. 517 class AbstractRefProcTaskExecutor::EnqueueTask { 518 protected: 519 EnqueueTask(ReferenceProcessor& ref_processor, 520 DiscoveredList refs_lists[], 521 HeapWord* pending_list_addr, 522 int n_queues) 523 : _ref_processor(ref_processor), 524 _refs_lists(refs_lists), 525 _pending_list_addr(pending_list_addr), 526 _n_queues(n_queues) 527 { } 528 529 public: 530 virtual void work(unsigned int work_id) = 0; 531 532 protected: 533 ReferenceProcessor& _ref_processor; 534 DiscoveredList* _refs_lists; 535 HeapWord* _pending_list_addr; 536 int _n_queues; 537 }; 538 539 #endif // SHARE_VM_MEMORY_REFERENCEPROCESSOR_HPP