< prev index next >

src/hotspot/share/gc/g1/g1DirtyCardQueue.hpp

Print this page
rev 53581 : [mq]: move_files
rev 53582 : imported patch rename

*** 20,87 **** * or visit www.oracle.com if you need additional information or have any * questions. * */ ! #ifndef SHARE_GC_G1_DIRTYCARDQUEUE_HPP ! #define SHARE_GC_G1_DIRTYCARDQUEUE_HPP #include "gc/shared/ptrQueue.hpp" #include "memory/allocation.hpp" ! class DirtyCardQueueSet; class G1FreeIdSet; class JavaThread; class Monitor; // A closure class for processing card table entries. Note that we don't // require these closure objects to be stack-allocated. ! class CardTableEntryClosure: public CHeapObj<mtGC> { public: // Process the card whose card table entry is "card_ptr". If returns // "false", terminate the iteration early. virtual bool do_card_ptr(jbyte* card_ptr, uint worker_i) = 0; }; // A ptrQueue whose elements are "oops", pointers to object heads. ! class DirtyCardQueue: public PtrQueue { public: ! DirtyCardQueue(DirtyCardQueueSet* qset, bool permanent = false); // Flush before destroying; queue may be used to capture pending work while // doing something else, with auto-flush on completion. ! ~DirtyCardQueue(); // Process queue entries and release resources. void flush() { flush_impl(); } // Compiler support. static ByteSize byte_offset_of_index() { ! return PtrQueue::byte_offset_of_index<DirtyCardQueue>(); } using PtrQueue::byte_width_of_index; static ByteSize byte_offset_of_buf() { ! return PtrQueue::byte_offset_of_buf<DirtyCardQueue>(); } using PtrQueue::byte_width_of_buf; }; ! class DirtyCardQueueSet: public PtrQueueSet { ! DirtyCardQueue _shared_dirty_card_queue; // Apply the closure to the elements of "node" from it's index to // buffer_size. If all closure applications return true, then // returns true. Stops processing after the first closure // application that returns false, and returns false from this // function. If "consume" is true, the node's index is updated to // exclude the processed elements, e.g. up to the element for which // the closure returned false. ! bool apply_closure_to_buffer(CardTableEntryClosure* cl, BufferNode* node, bool consume, uint worker_i = 0); // If there are more than stop_at completed buffers, pop one, apply --- 20,87 ---- * or visit www.oracle.com if you need additional information or have any * questions. * */ ! #ifndef SHARE_GC_G1_G1DIRTYCARDQUEUE_HPP ! #define SHARE_GC_G1_G1DIRTYCARDQUEUE_HPP #include "gc/shared/ptrQueue.hpp" #include "memory/allocation.hpp" ! class G1DirtyCardQueueSet; class G1FreeIdSet; class JavaThread; class Monitor; // A closure class for processing card table entries. Note that we don't // require these closure objects to be stack-allocated. ! class G1CardTableEntryClosure: public CHeapObj<mtGC> { public: // Process the card whose card table entry is "card_ptr". If returns // "false", terminate the iteration early. virtual bool do_card_ptr(jbyte* card_ptr, uint worker_i) = 0; }; // A ptrQueue whose elements are "oops", pointers to object heads. ! class G1DirtyCardQueue: public PtrQueue { public: ! G1DirtyCardQueue(G1DirtyCardQueueSet* qset, bool permanent = false); // Flush before destroying; queue may be used to capture pending work while // doing something else, with auto-flush on completion. ! ~G1DirtyCardQueue(); // Process queue entries and release resources. void flush() { flush_impl(); } // Compiler support. static ByteSize byte_offset_of_index() { ! return PtrQueue::byte_offset_of_index<G1DirtyCardQueue>(); } using PtrQueue::byte_width_of_index; static ByteSize byte_offset_of_buf() { ! return PtrQueue::byte_offset_of_buf<G1DirtyCardQueue>(); } using PtrQueue::byte_width_of_buf; }; ! class G1DirtyCardQueueSet: public PtrQueueSet { ! G1DirtyCardQueue _shared_dirty_card_queue; // Apply the closure to the elements of "node" from it's index to // buffer_size. If all closure applications return true, then // returns true. Stops processing after the first closure // application that returns false, and returns false from this // function. If "consume" is true, the node's index is updated to // exclude the processed elements, e.g. up to the element for which // the closure returned false. ! bool apply_closure_to_buffer(G1CardTableEntryClosure* cl, BufferNode* node, bool consume, uint worker_i = 0); // If there are more than stop_at completed buffers, pop one, apply
*** 94,104 **** // elements, e.g. up to the element for which the closure returned // false) is returned to the completed buffer set. // // If during_pause is true, stop_at must be zero, and the closure // must never return false. ! bool apply_closure_to_completed_buffer(CardTableEntryClosure* cl, uint worker_i, size_t stop_at, bool during_pause); bool mut_process_buffer(BufferNode* node); --- 94,104 ---- // elements, e.g. up to the element for which the closure returned // false) is returned to the completed buffer set. // // If during_pause is true, stop_at must be zero, and the closure // must never return false. ! bool apply_closure_to_completed_buffer(G1CardTableEntryClosure* cl, uint worker_i, size_t stop_at, bool during_pause); bool mut_process_buffer(BufferNode* node);
*** 111,125 **** jint _processed_buffers_rs_thread; // Current buffer node used for parallel iteration. BufferNode* volatile _cur_par_buffer_node; ! void concatenate_log(DirtyCardQueue& dcq); public: ! DirtyCardQueueSet(bool notify_when_complete = true); ! ~DirtyCardQueueSet(); void initialize(Monitor* cbl_mon, BufferNode::Allocator* allocator, Mutex* lock, bool init_free_ids = false); --- 111,125 ---- jint _processed_buffers_rs_thread; // Current buffer node used for parallel iteration. BufferNode* volatile _cur_par_buffer_node; ! void concatenate_log(G1DirtyCardQueue& dcq); public: ! G1DirtyCardQueueSet(bool notify_when_complete = true); ! ~G1DirtyCardQueueSet(); void initialize(Monitor* cbl_mon, BufferNode::Allocator* allocator, Mutex* lock, bool init_free_ids = false);
*** 134,152 **** // completed buffers remaining. bool refine_completed_buffer_concurrently(uint worker_i, size_t stop_at); // Apply the given closure to all completed buffers. The given closure's do_card_ptr // must never return false. Must only be called during GC. ! bool apply_closure_during_gc(CardTableEntryClosure* cl, uint worker_i); void reset_for_par_iteration() { _cur_par_buffer_node = completed_buffers_head(); } // Applies the current closure to all completed buffers, non-consumptively. // Can be used in parallel, all callers using the iteration state initialized // by reset_for_par_iteration. ! void par_apply_closure_to_all_completed_buffers(CardTableEntryClosure* cl); ! DirtyCardQueue* shared_dirty_card_queue() { return &_shared_dirty_card_queue; } // If a full collection is happening, reset partial logs, and ignore // completed ones: the full collection will make them all irrelevant. --- 134,152 ---- // completed buffers remaining. bool refine_completed_buffer_concurrently(uint worker_i, size_t stop_at); // Apply the given closure to all completed buffers. The given closure's do_card_ptr // must never return false. Must only be called during GC. ! bool apply_closure_during_gc(G1CardTableEntryClosure* cl, uint worker_i); void reset_for_par_iteration() { _cur_par_buffer_node = completed_buffers_head(); } // Applies the current closure to all completed buffers, non-consumptively. // Can be used in parallel, all callers using the iteration state initialized // by reset_for_par_iteration. ! void par_apply_closure_to_all_completed_buffers(G1CardTableEntryClosure* cl); ! G1DirtyCardQueue* shared_dirty_card_queue() { return &_shared_dirty_card_queue; } // If a full collection is happening, reset partial logs, and ignore // completed ones: the full collection will make them all irrelevant.
*** 162,167 **** return _processed_buffers_rs_thread; } }; ! #endif // SHARE_GC_G1_DIRTYCARDQUEUE_HPP --- 162,167 ---- return _processed_buffers_rs_thread; } }; ! #endif // SHARE_GC_G1_G1DIRTYCARDQUEUE_HPP
< prev index next >