< prev index next >

src/hotspot/share/gc/shared/referenceProcessor.hpp

Print this page
rev 49831 : imported patch 8201492-properly-implement-non-contiguous-reference-processing
rev 49834 : [mq]: 8202021-cleanup-referenceprocessor

*** 1,7 **** /* ! * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. --- 1,7 ---- /* ! * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation.
*** 74,88 **** // Iterator for the list of discovered references. class DiscoveredListIterator { private: DiscoveredList& _refs_list; ! HeapWord* _prev_next; ! oop _prev; oop _ref; HeapWord* _discovered_addr; ! oop _next; HeapWord* _referent_addr; oop _referent; OopClosure* _keep_alive; BoolObjectClosure* _is_alive; --- 74,88 ---- // Iterator for the list of discovered references. class DiscoveredListIterator { private: DiscoveredList& _refs_list; ! HeapWord* _prev_discovered_addr; ! oop _prev_discovered; oop _ref; HeapWord* _discovered_addr; ! oop _next_discovered; HeapWord* _referent_addr; oop _referent; OopClosure* _keep_alive; BoolObjectClosure* _is_alive;
*** 121,132 **** // discovery concurrently, or interleaved, with mutator execution. void load_ptrs(DEBUG_ONLY(bool allow_null_referent)); // Move to the next discovered reference. inline void next() { ! _prev_next = _discovered_addr; ! _prev = _ref; move_to_next(); } // Remove the current reference from the list void remove(); --- 121,132 ---- // discovery concurrently, or interleaved, with mutator execution. void load_ptrs(DEBUG_ONLY(bool allow_null_referent)); // Move to the next discovered reference. inline void next() { ! _prev_discovered_addr = _discovered_addr; ! _prev_discovered = _ref; move_to_next(); } // Remove the current reference from the list void remove();
*** 148,162 **** inline size_t processed() const { return _processed; } inline size_t removed() const { return _removed; } ) inline void move_to_next() { ! if (_ref == _next) { // End of the list. _ref = NULL; } else { ! _ref = _next; } assert(_ref != _first_seen, "cyclic ref_list found"); NOT_PRODUCT(_processed++); } }; --- 148,162 ---- inline size_t processed() const { return _processed; } inline size_t removed() const { return _removed; } ) inline void move_to_next() { ! if (_ref == _next_discovered) { // End of the list. _ref = NULL; } else { ! _ref = _next_discovered; } assert(_ref != _first_seen, "cyclic ref_list found"); NOT_PRODUCT(_processed++); } };
*** 177,187 **** bool _discovery_is_mt; // true if reference discovery is MT. bool _enqueuing_is_done; // true if all weak references enqueued bool _processing_is_mt; // true during phases when // reference processing is MT. ! uint _next_id; // round-robin mod _num_q counter in // support of work distribution // For collectors that do not keep GC liveness information // in the object header, this field holds a closure that // helps the reference processor determine the reachability --- 177,187 ---- bool _discovery_is_mt; // true if reference discovery is MT. bool _enqueuing_is_done; // true if all weak references enqueued bool _processing_is_mt; // true during phases when // reference processing is MT. ! uint _next_id; // round-robin mod _num_queues counter in // support of work distribution // For collectors that do not keep GC liveness information // in the object header, this field holds a closure that // helps the reference processor determine the reachability
*** 198,210 **** ReferencePolicy* _current_soft_ref_policy; // The discovered ref lists themselves // The active MT'ness degree of the queues below ! uint _num_q; // The maximum MT'ness degree of the queues below ! uint _max_num_q; // Master array of discovered oops DiscoveredList* _discovered_refs; // Arrays of lists of oops, one per thread (pointers into master array above) --- 198,210 ---- ReferencePolicy* _current_soft_ref_policy; // The discovered ref lists themselves // The active MT'ness degree of the queues below ! uint _num_queues; // The maximum MT'ness degree of the queues below ! uint _max_num_queues; // Master array of discovered oops DiscoveredList* _discovered_refs; // Arrays of lists of oops, one per thread (pointers into master array above)
*** 214,225 **** DiscoveredList* _discoveredPhantomRefs; public: static int number_of_subclasses_of_ref() { return (REF_PHANTOM - REF_OTHER); } ! uint num_q() { return _num_q; } ! uint max_num_q() { return _max_num_q; } void set_active_mt_degree(uint v); DiscoveredList* discovered_refs() { return _discovered_refs; } ReferencePolicy* setup_policy(bool always_clear) { --- 214,225 ---- DiscoveredList* _discoveredPhantomRefs; public: static int number_of_subclasses_of_ref() { return (REF_PHANTOM - REF_OTHER); } ! uint num_queues() const { return _num_queues; } ! uint max_num_queues() const { return _max_num_queues; } void set_active_mt_degree(uint v); DiscoveredList* discovered_refs() { return _discovered_refs; } ReferencePolicy* setup_policy(bool always_clear) {
*** 261,271 **** DiscoveredList& refs_list, BoolObjectClosure* is_alive, OopClosure* keep_alive, VoidClosure* complete_gc); // Phase3: process the referents by either clearing them ! // or keeping them alive (and their closure) void process_phase3(DiscoveredList& refs_list, bool clear_referent, BoolObjectClosure* is_alive, OopClosure* keep_alive, VoidClosure* complete_gc); --- 261,271 ---- DiscoveredList& refs_list, BoolObjectClosure* is_alive, OopClosure* keep_alive, VoidClosure* complete_gc); // Phase3: process the referents by either clearing them ! // or keeping them alive (and their closure), and enqueuing them. void process_phase3(DiscoveredList& refs_list, bool clear_referent, BoolObjectClosure* is_alive, OopClosure* keep_alive, VoidClosure* complete_gc);
*** 287,297 **** VoidClosure* complete_gc, YieldClosure* yield, GCTimer* gc_timer); // Returns the name of the discovered reference list ! // occupying the i / _num_q slot. const char* list_name(uint i); void enqueue_discovered_reflists(AbstractRefProcTaskExecutor* task_executor, ReferenceProcessorPhaseTimes* phase_times); --- 287,297 ---- VoidClosure* complete_gc, YieldClosure* yield, GCTimer* gc_timer); // Returns the name of the discovered reference list ! // occupying the i / _num_queues slot. const char* list_name(uint i); void enqueue_discovered_reflists(AbstractRefProcTaskExecutor* task_executor, ReferenceProcessorPhaseTimes* phase_times);
*** 302,319 **** BoolObjectClosure* is_alive, OopClosure* keep_alive, VoidClosure* complete_gc, YieldClosure* yield); private: ! // round-robin mod _num_q (not: _not_ mode _max_num_q) uint next_id() { uint id = _next_id; assert(!_discovery_is_mt, "Round robin should only be used in serial discovery"); ! if (++_next_id == _num_q) { _next_id = 0; } ! assert(_next_id < _num_q, "_next_id %u _num_q %u _max_num_q %u", _next_id, _num_q, _max_num_q); return id; } DiscoveredList* get_discovered_list(ReferenceType rt); inline void add_to_discovered_list_mt(DiscoveredList& refs_list, oop obj, HeapWord* discovered_addr); --- 302,319 ---- BoolObjectClosure* is_alive, OopClosure* keep_alive, VoidClosure* complete_gc, YieldClosure* yield); private: ! // round-robin mod _num_queues (not: _not_ mod _max_num_queues) uint next_id() { uint id = _next_id; assert(!_discovery_is_mt, "Round robin should only be used in serial discovery"); ! if (++_next_id == _num_queues) { _next_id = 0; } ! assert(_next_id < _num_queues, "_next_id %u _num_queues %u _max_num_queues %u", _next_id, _num_queues, _max_num_queues); return id; } DiscoveredList* get_discovered_list(ReferenceType rt); inline void add_to_discovered_list_mt(DiscoveredList& refs_list, oop obj, HeapWord* discovered_addr);
< prev index next >