1 /* 2 * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_VM_GC_SHARED_REFERENCEPROCESSOR_HPP 26 #define SHARE_VM_GC_SHARED_REFERENCEPROCESSOR_HPP 27 28 #include "gc/shared/referenceDiscoverer.hpp" 29 #include "gc/shared/referencePolicy.hpp" 30 #include "gc/shared/referenceProcessorPhaseTimes.hpp" 31 #include "gc/shared/referenceProcessorStats.hpp" 32 #include "memory/referenceType.hpp" 33 #include "oops/instanceRefKlass.hpp" 34 35 class GCTimer; 36 37 // ReferenceProcessor class encapsulates the per-"collector" processing 38 // of java.lang.Reference objects for GC. The interface is useful for supporting 39 // a generational abstraction, in particular when there are multiple 40 // generations that are being independently collected -- possibly 41 // concurrently and/or incrementally. 42 // ReferenceProcessor class abstracts away from a generational setting 43 // by using a closure that determines whether a given reference or referent are 44 // subject to this ReferenceProcessor's discovery, thus allowing its use in a 45 // straightforward manner in a general, non-generational, non-contiguous generation 46 // (or heap) setting. 47 // 48 49 // forward references 50 class ReferencePolicy; 51 class AbstractRefProcTaskExecutor; 52 53 // List of discovered references. 54 class DiscoveredList { 55 public: 56 DiscoveredList() : _len(0), _compressed_head(0), _oop_head(NULL) { } 57 inline oop head() const; 58 HeapWord* adr_head() { 59 return UseCompressedOops ? (HeapWord*)&_compressed_head : 60 (HeapWord*)&_oop_head; 61 } 62 inline void set_head(oop o); 63 inline bool is_empty() const; 64 size_t length() { return _len; } 65 void set_length(size_t len) { _len = len; } 66 void inc_length(size_t inc) { _len += inc; assert(_len > 0, "Error"); } 67 void dec_length(size_t dec) { _len -= dec; } 68 private: 69 // Set value depending on UseCompressedOops. This could be a template class 70 // but then we have to fix all the instantiations and declarations that use this class. 71 oop _oop_head; 72 narrowOop _compressed_head; 73 size_t _len; 74 }; 75 76 // Iterator for the list of discovered references. 77 class DiscoveredListIterator { 78 private: 79 DiscoveredList& _refs_list; 80 HeapWord* _prev_next; 81 oop _prev; 82 oop _ref; 83 HeapWord* _discovered_addr; 84 oop _next; 85 HeapWord* _referent_addr; 86 oop _referent; 87 OopClosure* _keep_alive; 88 BoolObjectClosure* _is_alive; 89 90 DEBUG_ONLY( 91 oop _first_seen; // cyclic linked list check 92 ) 93 94 NOT_PRODUCT( 95 size_t _processed; 96 size_t _removed; 97 ) 98 99 public: 100 inline DiscoveredListIterator(DiscoveredList& refs_list, 101 OopClosure* keep_alive, 102 BoolObjectClosure* is_alive); 103 104 // End Of List. 105 inline bool has_next() const { return _ref != NULL; } 106 107 // Get oop to the Reference object. 108 inline oop obj() const { return _ref; } 109 110 // Get oop to the referent object. 111 inline oop referent() const { return _referent; } 112 113 // Returns true if referent is alive. 114 inline bool is_referent_alive() const { 115 return _is_alive->do_object_b(_referent); 116 } 117 118 // Loads data for the current reference. 119 // The "allow_null_referent" argument tells us to allow for the possibility 120 // of a NULL referent in the discovered Reference object. This typically 121 // happens in the case of concurrent collectors that may have done the 122 // discovery concurrently, or interleaved, with mutator execution. 123 void load_ptrs(DEBUG_ONLY(bool allow_null_referent)); 124 125 // Move to the next discovered reference. 126 inline void next() { 127 _prev_next = _discovered_addr; 128 _prev = _ref; 129 move_to_next(); 130 } 131 132 // Remove the current reference from the list 133 void remove(); 134 135 // Make the referent alive. 136 inline void make_referent_alive() { 137 if (UseCompressedOops) { 138 _keep_alive->do_oop((narrowOop*)_referent_addr); 139 } else { 140 _keep_alive->do_oop((oop*)_referent_addr); 141 } 142 } 143 144 // NULL out referent pointer. 145 void clear_referent(); 146 147 // Statistics 148 NOT_PRODUCT( 149 inline size_t processed() const { return _processed; } 150 inline size_t removed() const { return _removed; } 151 ) 152 153 inline void move_to_next() { 154 if (_ref == _next) { 155 // End of the list. 156 _ref = NULL; 157 } else { 158 _ref = _next; 159 } 160 assert(_ref != _first_seen, "cyclic ref_list found"); 161 NOT_PRODUCT(_processed++); 162 } 163 }; 164 165 class ReferenceProcessor : public ReferenceDiscoverer { 166 size_t total_count(DiscoveredList lists[]) const; 167 168 // The SoftReference master timestamp clock 169 static jlong _soft_ref_timestamp_clock; 170 171 BoolObjectClosure* _is_subject_to_discovery; // determines whether a given oop is subject 172 // to this ReferenceProcessor's discovery 173 // (and further processing). 174 175 bool _discovering_refs; // true when discovery enabled 176 bool _discovery_is_atomic; // if discovery is atomic wrt 177 // other collectors in configuration 178 bool _discovery_is_mt; // true if reference discovery is MT. 179 180 bool _enqueuing_is_done; // true if all weak references enqueued 181 bool _processing_is_mt; // true during phases when 182 // reference processing is MT. 183 uint _next_id; // round-robin mod _num_q counter in 184 // support of work distribution 185 186 // For collectors that do not keep GC liveness information 187 // in the object header, this field holds a closure that 188 // helps the reference processor determine the reachability 189 // of an oop. It is currently initialized to NULL for all 190 // collectors except for CMS and G1. 191 BoolObjectClosure* _is_alive_non_header; 192 193 // Soft ref clearing policies 194 // . the default policy 195 static ReferencePolicy* _default_soft_ref_policy; 196 // . the "clear all" policy 197 static ReferencePolicy* _always_clear_soft_ref_policy; 198 // . the current policy below is either one of the above 199 ReferencePolicy* _current_soft_ref_policy; 200 201 // The discovered ref lists themselves 202 203 // The active MT'ness degree of the queues below 204 uint _num_q; 205 // The maximum MT'ness degree of the queues below 206 uint _max_num_q; 207 208 // Master array of discovered oops 209 DiscoveredList* _discovered_refs; 210 211 // Arrays of lists of oops, one per thread (pointers into master array above) 212 DiscoveredList* _discoveredSoftRefs; 213 DiscoveredList* _discoveredWeakRefs; 214 DiscoveredList* _discoveredFinalRefs; 215 DiscoveredList* _discoveredPhantomRefs; 216 217 public: 218 static int number_of_subclasses_of_ref() { return (REF_PHANTOM - REF_OTHER); } 219 220 uint num_q() { return _num_q; } 221 uint max_num_q() { return _max_num_q; } 222 void set_active_mt_degree(uint v); 223 224 DiscoveredList* discovered_refs() { return _discovered_refs; } 225 226 ReferencePolicy* setup_policy(bool always_clear) { 227 _current_soft_ref_policy = always_clear ? 228 _always_clear_soft_ref_policy : _default_soft_ref_policy; 229 _current_soft_ref_policy->setup(); // snapshot the policy threshold 230 return _current_soft_ref_policy; 231 } 232 233 // Process references with a certain reachability level. 234 void process_discovered_reflist(DiscoveredList refs_lists[], 235 ReferencePolicy* policy, 236 bool clear_referent, 237 BoolObjectClosure* is_alive, 238 OopClosure* keep_alive, 239 VoidClosure* complete_gc, 240 AbstractRefProcTaskExecutor* task_executor, 241 ReferenceProcessorPhaseTimes* phase_times); 242 243 // Work methods used by the method process_discovered_reflist 244 // Phase1: keep alive all those referents that are otherwise 245 // dead but which must be kept alive by policy (and their closure). 246 void process_phase1(DiscoveredList& refs_list, 247 ReferencePolicy* policy, 248 BoolObjectClosure* is_alive, 249 OopClosure* keep_alive, 250 VoidClosure* complete_gc); 251 // Phase2: remove all those references whose referents are 252 // reachable. 253 void process_phase2(DiscoveredList& refs_list, 254 BoolObjectClosure* is_alive, 255 OopClosure* keep_alive, 256 VoidClosure* complete_gc); 257 // Work methods in support of process_phase2 258 void pp2_work(DiscoveredList& refs_list, 259 BoolObjectClosure* is_alive, 260 OopClosure* keep_alive); 261 void pp2_work_concurrent_discovery( 262 DiscoveredList& refs_list, 263 BoolObjectClosure* is_alive, 264 OopClosure* keep_alive, 265 VoidClosure* complete_gc); 266 // Phase3: process the referents by either clearing them 267 // or keeping them alive (and their closure) 268 void process_phase3(DiscoveredList& refs_list, 269 bool clear_referent, 270 BoolObjectClosure* is_alive, 271 OopClosure* keep_alive, 272 VoidClosure* complete_gc); 273 274 // Enqueue references with a certain reachability level 275 void enqueue_discovered_reflist(DiscoveredList& refs_list); 276 277 // "Preclean" all the discovered reference lists 278 // by removing references with strongly reachable referents. 279 // The first argument is a predicate on an oop that indicates 280 // its (strong) reachability and the second is a closure that 281 // may be used to incrementalize or abort the precleaning process. 282 // The caller is responsible for taking care of potential 283 // interference with concurrent operations on these lists 284 // (or predicates involved) by other threads. Currently 285 // only used by the CMS collector. 286 void preclean_discovered_references(BoolObjectClosure* is_alive, 287 OopClosure* keep_alive, 288 VoidClosure* complete_gc, 289 YieldClosure* yield, 290 GCTimer* gc_timer); 291 292 // Returns the name of the discovered reference list 293 // occupying the i / _num_q slot. 294 const char* list_name(uint i); 295 296 void enqueue_discovered_reflists(AbstractRefProcTaskExecutor* task_executor, 297 ReferenceProcessorPhaseTimes* phase_times); 298 299 // "Preclean" the given discovered reference list 300 // by removing references with strongly reachable referents. 301 // Currently used in support of CMS only. 302 void preclean_discovered_reflist(DiscoveredList& refs_list, 303 BoolObjectClosure* is_alive, 304 OopClosure* keep_alive, 305 VoidClosure* complete_gc, 306 YieldClosure* yield); 307 private: 308 // round-robin mod _num_q (not: _not_ mode _max_num_q) 309 uint next_id() { 310 uint id = _next_id; 311 assert(!_discovery_is_mt, "Round robin should only be used in serial discovery"); 312 if (++_next_id == _num_q) { 313 _next_id = 0; 314 } 315 assert(_next_id < _num_q, "_next_id %u _num_q %u _max_num_q %u", _next_id, _num_q, _max_num_q); 316 return id; 317 } 318 DiscoveredList* get_discovered_list(ReferenceType rt); 319 inline void add_to_discovered_list_mt(DiscoveredList& refs_list, oop obj, 320 HeapWord* discovered_addr); 321 322 void clear_discovered_references(DiscoveredList& refs_list); 323 324 void log_reflist_counts(DiscoveredList ref_lists[], uint active_length, size_t total_count) PRODUCT_RETURN; 325 326 // Balances reference queues. 327 void balance_queues(DiscoveredList ref_lists[]); 328 329 // Update (advance) the soft ref master clock field. 330 void update_soft_ref_master_clock(); 331 332 template <class T> 333 bool is_subject_to_discovery(T const obj) const; 334 public: 335 // Default parameters give you a vanilla reference processor. 336 ReferenceProcessor(BoolObjectClosure* is_subject_to_discovery, 337 bool mt_processing = false, uint mt_processing_degree = 1, 338 bool mt_discovery = false, uint mt_discovery_degree = 1, 339 bool atomic_discovery = true, 340 BoolObjectClosure* is_alive_non_header = NULL); 341 342 // RefDiscoveryPolicy values 343 enum DiscoveryPolicy { 344 ReferenceBasedDiscovery = 0, 345 ReferentBasedDiscovery = 1, 346 DiscoveryPolicyMin = ReferenceBasedDiscovery, 347 DiscoveryPolicyMax = ReferentBasedDiscovery 348 }; 349 350 static void init_statics(); 351 352 public: 353 // get and set "is_alive_non_header" field 354 BoolObjectClosure* is_alive_non_header() { 355 return _is_alive_non_header; 356 } 357 void set_is_alive_non_header(BoolObjectClosure* is_alive_non_header) { 358 _is_alive_non_header = is_alive_non_header; 359 } 360 361 BoolObjectClosure* is_subject_to_discovery_closure() const { return _is_subject_to_discovery; } 362 void set_is_subject_to_discovery_closure(BoolObjectClosure* cl) { _is_subject_to_discovery = cl; } 363 364 // start and stop weak ref discovery 365 void enable_discovery(bool check_no_refs = true); 366 void disable_discovery() { _discovering_refs = false; } 367 bool discovery_enabled() { return _discovering_refs; } 368 369 // whether discovery is atomic wrt other collectors 370 bool discovery_is_atomic() const { return _discovery_is_atomic; } 371 void set_atomic_discovery(bool atomic) { _discovery_is_atomic = atomic; } 372 373 // whether discovery is done by multiple threads same-old-timeously 374 bool discovery_is_mt() const { return _discovery_is_mt; } 375 void set_mt_discovery(bool mt) { _discovery_is_mt = mt; } 376 377 // Whether we are in a phase when _processing_ is MT. 378 bool processing_is_mt() const { return _processing_is_mt; } 379 void set_mt_processing(bool mt) { _processing_is_mt = mt; } 380 381 // whether all enqueueing of weak references is complete 382 bool enqueuing_is_done() { return _enqueuing_is_done; } 383 void set_enqueuing_is_done(bool v) { _enqueuing_is_done = v; } 384 385 // iterate over oops 386 void weak_oops_do(OopClosure* f); // weak roots 387 388 // Balance each of the discovered lists. 389 void balance_all_queues(); 390 void verify_list(DiscoveredList& ref_list); 391 392 // Discover a Reference object, using appropriate discovery criteria 393 virtual bool discover_reference(oop obj, ReferenceType rt); 394 395 // Has discovered references that need handling 396 bool has_discovered_references(); 397 398 // Process references found during GC (called by the garbage collector) 399 ReferenceProcessorStats 400 process_discovered_references(BoolObjectClosure* is_alive, 401 OopClosure* keep_alive, 402 VoidClosure* complete_gc, 403 AbstractRefProcTaskExecutor* task_executor, 404 ReferenceProcessorPhaseTimes* phase_times); 405 406 // Enqueue references at end of GC (called by the garbage collector) 407 void enqueue_discovered_references(AbstractRefProcTaskExecutor* task_executor, 408 ReferenceProcessorPhaseTimes* phase_times); 409 410 // If a discovery is in process that is being superceded, abandon it: all 411 // the discovered lists will be empty, and all the objects on them will 412 // have NULL discovered fields. Must be called only at a safepoint. 413 void abandon_partial_discovery(); 414 415 size_t total_reference_count(ReferenceType rt) const; 416 417 // debugging 418 void verify_no_references_recorded() PRODUCT_RETURN; 419 void verify_referent(oop obj) PRODUCT_RETURN; 420 }; 421 422 // A reference processor that uses a single memory span to determine the area that 423 // is subject to discovery. Useful for collectors which have contiguous generations. 424 class SpanReferenceProcessor : public ReferenceProcessor { 425 class SpanBasedDiscoverer : public BoolObjectClosure { 426 public: 427 MemRegion _span; 428 429 SpanBasedDiscoverer(MemRegion span) : BoolObjectClosure(), _span(span) { } 430 431 virtual bool do_object_b(oop obj) { 432 return _span.contains(obj); 433 } 434 }; 435 436 SpanBasedDiscoverer _span_based_discoverer; 437 public: 438 SpanReferenceProcessor(MemRegion span, 439 bool mt_processing = false, uint mt_processing_degree = 1, 440 bool mt_discovery = false, uint mt_discovery_degree = 1, 441 bool atomic_discovery = true, 442 BoolObjectClosure* is_alive_non_header = NULL); 443 444 // get and set span 445 MemRegion span() { return _span_based_discoverer._span; } 446 void set_span(MemRegion span) { _span_based_discoverer._span = span; } 447 }; 448 449 // A utility class to disable reference discovery in 450 // the scope which contains it, for given ReferenceProcessor. 451 class NoRefDiscovery: StackObj { 452 private: 453 ReferenceProcessor* _rp; 454 bool _was_discovering_refs; 455 public: 456 NoRefDiscovery(ReferenceProcessor* rp) : _rp(rp) { 457 _was_discovering_refs = _rp->discovery_enabled(); 458 if (_was_discovering_refs) { 459 _rp->disable_discovery(); 460 } 461 } 462 463 ~NoRefDiscovery() { 464 if (_was_discovering_refs) { 465 _rp->enable_discovery(false /*check_no_refs*/); 466 } 467 } 468 }; 469 470 // A utility class to temporarily mutate the subject discovery closure of the 471 // given ReferenceProcessor in the scope that contains it. 472 class ReferenceProcessorSubjectToDiscoveryMutator : StackObj { 473 private: 474 ReferenceProcessor* _rp; 475 BoolObjectClosure* _saved_cl; 476 477 public: 478 ReferenceProcessorSubjectToDiscoveryMutator(ReferenceProcessor* rp, BoolObjectClosure* cl): 479 _rp(rp) { 480 _saved_cl = _rp->is_subject_to_discovery_closure(); 481 _rp->set_is_subject_to_discovery_closure(cl); 482 } 483 484 ~ReferenceProcessorSubjectToDiscoveryMutator() { 485 _rp->set_is_subject_to_discovery_closure(_saved_cl); 486 } 487 }; 488 489 // A utility class to temporarily mutate the span of the 490 // given ReferenceProcessor in the scope that contains it. 491 class ReferenceProcessorSpanMutator: StackObj { 492 private: 493 SpanReferenceProcessor* _rp; 494 MemRegion _saved_span; 495 496 public: 497 ReferenceProcessorSpanMutator(SpanReferenceProcessor* rp, 498 MemRegion span): 499 _rp(rp) { 500 _saved_span = _rp->span(); 501 _rp->set_span(span); 502 } 503 504 ~ReferenceProcessorSpanMutator() { 505 _rp->set_span(_saved_span); 506 } 507 }; 508 509 // A utility class to temporarily change the MT'ness of 510 // reference discovery for the given ReferenceProcessor 511 // in the scope that contains it. 512 class ReferenceProcessorMTDiscoveryMutator: StackObj { 513 private: 514 ReferenceProcessor* _rp; 515 bool _saved_mt; 516 517 public: 518 ReferenceProcessorMTDiscoveryMutator(ReferenceProcessor* rp, 519 bool mt): 520 _rp(rp) { 521 _saved_mt = _rp->discovery_is_mt(); 522 _rp->set_mt_discovery(mt); 523 } 524 525 ~ReferenceProcessorMTDiscoveryMutator() { 526 _rp->set_mt_discovery(_saved_mt); 527 } 528 }; 529 530 // A utility class to temporarily change the disposition 531 // of the "is_alive_non_header" closure field of the 532 // given ReferenceProcessor in the scope that contains it. 533 class ReferenceProcessorIsAliveMutator: StackObj { 534 private: 535 ReferenceProcessor* _rp; 536 BoolObjectClosure* _saved_cl; 537 538 public: 539 ReferenceProcessorIsAliveMutator(ReferenceProcessor* rp, 540 BoolObjectClosure* cl): 541 _rp(rp) { 542 _saved_cl = _rp->is_alive_non_header(); 543 _rp->set_is_alive_non_header(cl); 544 } 545 546 ~ReferenceProcessorIsAliveMutator() { 547 _rp->set_is_alive_non_header(_saved_cl); 548 } 549 }; 550 551 // A utility class to temporarily change the disposition 552 // of the "discovery_is_atomic" field of the 553 // given ReferenceProcessor in the scope that contains it. 554 class ReferenceProcessorAtomicMutator: StackObj { 555 private: 556 ReferenceProcessor* _rp; 557 bool _saved_atomic_discovery; 558 559 public: 560 ReferenceProcessorAtomicMutator(ReferenceProcessor* rp, 561 bool atomic): 562 _rp(rp) { 563 _saved_atomic_discovery = _rp->discovery_is_atomic(); 564 _rp->set_atomic_discovery(atomic); 565 } 566 567 ~ReferenceProcessorAtomicMutator() { 568 _rp->set_atomic_discovery(_saved_atomic_discovery); 569 } 570 }; 571 572 573 // A utility class to temporarily change the MT processing 574 // disposition of the given ReferenceProcessor instance 575 // in the scope that contains it. 576 class ReferenceProcessorMTProcMutator: StackObj { 577 private: 578 ReferenceProcessor* _rp; 579 bool _saved_mt; 580 581 public: 582 ReferenceProcessorMTProcMutator(ReferenceProcessor* rp, 583 bool mt): 584 _rp(rp) { 585 _saved_mt = _rp->processing_is_mt(); 586 _rp->set_mt_processing(mt); 587 } 588 589 ~ReferenceProcessorMTProcMutator() { 590 _rp->set_mt_processing(_saved_mt); 591 } 592 }; 593 594 595 // This class is an interface used to implement task execution for the 596 // reference processing. 597 class AbstractRefProcTaskExecutor { 598 public: 599 600 // Abstract tasks to execute. 601 class ProcessTask; 602 class EnqueueTask; 603 604 // Executes a task using worker threads. 605 virtual void execute(ProcessTask& task) = 0; 606 virtual void execute(EnqueueTask& task) = 0; 607 608 // Switch to single threaded mode. 609 virtual void set_single_threaded_mode() { }; 610 }; 611 612 // Abstract reference processing task to execute. 613 class AbstractRefProcTaskExecutor::ProcessTask { 614 protected: 615 ProcessTask(ReferenceProcessor& ref_processor, 616 DiscoveredList refs_lists[], 617 bool marks_oops_alive, 618 ReferenceProcessorPhaseTimes* phase_times) 619 : _ref_processor(ref_processor), 620 _refs_lists(refs_lists), 621 _phase_times(phase_times), 622 _marks_oops_alive(marks_oops_alive) 623 { } 624 625 public: 626 virtual void work(unsigned int work_id, BoolObjectClosure& is_alive, 627 OopClosure& keep_alive, 628 VoidClosure& complete_gc) = 0; 629 630 // Returns true if a task marks some oops as alive. 631 bool marks_oops_alive() const 632 { return _marks_oops_alive; } 633 634 protected: 635 ReferenceProcessor& _ref_processor; 636 DiscoveredList* _refs_lists; 637 ReferenceProcessorPhaseTimes* _phase_times; 638 const bool _marks_oops_alive; 639 }; 640 641 // Abstract reference processing task to execute. 642 class AbstractRefProcTaskExecutor::EnqueueTask { 643 protected: 644 EnqueueTask(ReferenceProcessor& ref_processor, 645 DiscoveredList refs_lists[], 646 int n_queues, 647 ReferenceProcessorPhaseTimes* phase_times) 648 : _ref_processor(ref_processor), 649 _refs_lists(refs_lists), 650 _n_queues(n_queues), 651 _phase_times(phase_times) 652 { } 653 654 public: 655 virtual void work(unsigned int work_id) = 0; 656 657 protected: 658 ReferenceProcessor& _ref_processor; 659 DiscoveredList* _refs_lists; 660 ReferenceProcessorPhaseTimes* _phase_times; 661 int _n_queues; 662 }; 663 664 #endif // SHARE_VM_GC_SHARED_REFERENCEPROCESSOR_HPP